diff --git a/.github/actions/collect-testing-logs/action.yml b/.github/actions/collect-testing-logs/action.yml
index 952433ac97..c8ba983257 100644
--- a/.github/actions/collect-testing-logs/action.yml
+++ b/.github/actions/collect-testing-logs/action.yml
@@ -21,16 +21,6 @@ runs:
~/.cache/telepresence/logs \
/tmp/test-logs/telepresence-logs
fi
- if [ -f docker/container.txt ] && docker inspect $(cat docker/container.txt) >/dev/null; then
- docker exec $(cat docker/container.txt) /bin/bash -c 'mkdir -p /tmp/build-container-logs && (cp -r /tmp/*.txt /tmp/*.json /tmp/*.log /tmp/*.yaml /tmp/build-container-logs || true) && ((test -d ~/.cache/telepresence/logs && cp -r ~/.cache/telepresence/logs /tmp/build-container-logs/telepresence-logs) || true)'
- docker cp $(cat docker/container.txt):/tmp/build-container-logs /tmp/test-logs/build-container-logs || true
- fi
- - name: "Gather Coverage"
- shell: bash
- run: |
- if [ -f docker/container.txt ] && docker inspect $(cat docker/container.txt) >/dev/null; then
- docker cp $(cat docker/container.txt):/tmp/cov_html /tmp/cov_html || true
- fi
- name: "Gather Pod Logs"
shell: bash
run: |
diff --git a/.github/actions/execute-generate/action.yml b/.github/actions/execute-generate/action.yml
deleted file mode 100644
index 7f4724085d..0000000000
--- a/.github/actions/execute-generate/action.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: generate
-description: "execute make generate twice, ensure git not dirty"
-
-runs:
- using: "composite"
- steps:
- - name: "install bsdtar (libarchive-tools)"
- shell: bash
- run: |
- sudo apt-get update -y
- sudo apt-get install -y libarchive-tools
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - name: "'make generate'"
- shell: bash
- run: |
- make generate
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty from 'make generate'"
- - name: "'make generate' (again!)"
- shell: bash
- run: |
- make generate
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty from 'make generate' (again!)"
diff --git a/.github/actions/execute-go-tests/action.yml b/.github/actions/execute-go-tests/action.yml
deleted file mode 100644
index 81b6012e39..0000000000
--- a/.github/actions/execute-go-tests/action.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-name: gotest
-description: "execute go tests based upon parameters"
-
-runs:
- using: "composite"
- steps:
- - run: echo "PREFIX=gotests" >> $GITHUB_ENV
- shell: bash
- name: Set Unique Test prefix
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - uses: BSFishy/pip-action@v1
- with:
- packages: |
- awscli
- packaging
- name: "Install Python requirements with pip"
- - run: |
- go install gotest.tools/gotestsum@latest
- shell: bash
- name: "Install gotestsum (latest)"
- - name: make gotest
- shell: bash
- run: |
- export DEV_KUBE_NO_PVC=yes
- export KAT_REQ_LIMIT=900
- export TEST_XML_DIR=/tmp/test-logs/xml/
- export GOTEST_ARGS='-timeout 30m'
- mkdir -p ${TEST_XML_DIR}
- make gotest
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty from testing"
diff --git a/.github/actions/execute-job-image/action.yml b/.github/actions/execute-job-image/action.yml
deleted file mode 100644
index 87998fc902..0000000000
--- a/.github/actions/execute-job-image/action.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-name: job-image
-description: "push emissary image to dev"
-
-runs:
- using: "composite"
- steps:
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - name: "Docker Login"
- uses: docker/login-action@v1
- with:
- username: ${{ env.DOCKER_USERNAME }}
- password: ${{ env.DOCKER_PASSWORD }}
- - name: "make push"
- shell: bash
- run: |
- make push
- - name: "make push-dev"
- shell: bash
- run: |
- make push-dev
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty (from make push + make push-dev)"
diff --git a/.github/actions/execute-lint-test/action.yml b/.github/actions/execute-lint-test/action.yml
deleted file mode 100644
index c94fa9e4b6..0000000000
--- a/.github/actions/execute-lint-test/action.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-name: lint-test-action
-description: "Execute emissary lint tests"
-
-runs:
- using: "composite"
- steps:
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - name: make lint
- shell: bash
- run: |
- make lint
diff --git a/.github/actions/execute-pytest-unit/action.yml b/.github/actions/execute-pytest-unit/action.yml
deleted file mode 100644
index e485be5b4c..0000000000
--- a/.github/actions/execute-pytest-unit/action.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-name: pytest
-description: "execute emissary pytests"
-
-runs:
- using: "composite"
- steps:
- - run: echo "PREFIX=pytest-${{ matrix.test }}" >> $GITHUB_ENV
- name: Set Unique Test prefix ${{ env.PREFIX }}
- shell: bash
- - run: |
- sudo sysctl -w fs.file-max=1600000
- sudo sysctl -w fs.inotify.max_user_instances=4096
- shell: bash
- name: "Configure system file and inotify maximums (1600000/4096)"
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - uses: BSFishy/pip-action@v1
- with:
- packages: |
- awscli
- packaging
- name: "Install Python requirements with pip"
- - name: "Docker Login"
- uses: docker/login-action@v1
- with:
- username: ${{ env.DOCKER_USERNAME }}
- password: ${{ env.DOCKER_PASSWORD }}
- - name: make pytest-${{ matrix.test }}
- uses: nick-invision/retry@v2.4.0
- with:
- max_attempts: 3
- timeout_minutes: 20
- command: |
- export DEV_KUBE_NO_PVC=yes
- export KAT_REQ_LIMIT=900
- export TEST_XML_DIR=/tmp/test-logs/xml/
- export DEV_KUBECONFIG=~/.kube/config
- export DEV_REGISTRY=${{ env.DEV_REGISTRY }}
- mkdir -p ${TEST_XML_DIR}
- make pytest-${{ matrix.test }}
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty from testing"
diff --git a/.github/actions/execute-pytests/action.yml b/.github/actions/execute-pytests/action.yml
deleted file mode 100644
index 2e623bf37d..0000000000
--- a/.github/actions/execute-pytests/action.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-name: pytest
-description: "execute emissary pytests"
-
-runs:
- using: "composite"
- steps:
- - run: echo "PREFIX=pytest-${{ matrix.test }}" >> $GITHUB_ENV
- name: Set Unique Test prefix ${{ env.PREFIX }}
- shell: bash
- - run: echo "USE_LOCAL_K3S_CLUSTER=1" >> $GITHUB_ENV
- shell: bash
- name: "Set USE_LOCAL_K3S_CLUSTER=1"
- - run: |
- sudo sysctl -w fs.file-max=1600000
- sudo sysctl -w fs.inotify.max_user_instances=4096
- shell: bash
- name: "Configure system file and inotify maximums (1600000/4096)"
- - name: Install Deps
- uses: ./.github/actions/setup-deps
- - uses: BSFishy/pip-action@v1
- with:
- packages: |
- awscli
- packaging
- name: "Install Python requirements with pip"
- - name: "Install and start k3d"
- shell: bash
- run: |
- make ci/setup-k3d K3D_CLUSTER_NAME=amb-ci
- - name: "Docker Login"
- uses: docker/login-action@v1
- with:
- username: ${{ env.DOCKER_USERNAME }}
- password: ${{ env.DOCKER_PASSWORD }}
- - name: make pytest-${{ matrix.test }}
- uses: nick-invision/retry@v2.4.0
- with:
- max_attempts: 3
- timeout_minutes: 20
- command: |
- export DEV_KUBE_NO_PVC=yes
- export KAT_REQ_LIMIT=900
- export TEST_XML_DIR=/tmp/test-logs/xml/
- export DEV_KUBECONFIG=~/.kube/config
- export DEV_REGISTRY=${{ env.DEV_REGISTRY }}
- mkdir -p ${TEST_XML_DIR}
- make pytest-${{ matrix.test }}
- - uses: ./.github/actions/git-dirty-check
- name: "Check git not dirty from testing"
diff --git a/.github/actions/setup-deps/action.yml b/.github/actions/setup-deps/action.yml
index 9622408c48..c3cd3fd13c 100644
--- a/.github/actions/setup-deps/action.yml
+++ b/.github/actions/setup-deps/action.yml
@@ -4,6 +4,17 @@ description: "Install Go and Python"
runs:
using: "composite"
steps:
+ - name: "Install bsdtar (libarchive-tools)"
+ shell: bash
+ run: |
+ sudo apt-get update -y
+ sudo apt-get install -y libarchive-tools
+ - name: "Install Python requirements with pip"
+ uses: BSFishy/pip-action@v1
+ with:
+ packages: |
+ awscli
+ packaging
# Go: Do this first because `Makefile` checks that the `go` version is correct.
- name: "Get Go version from builder container"
id: step-detect-go
diff --git a/.github/workflows/execute-tests-and-promote.yml b/.github/workflows/execute-tests-and-promote.yml
index 6fa5b855b5..8d68fd365c 100644
--- a/.github/workflows/execute-tests-and-promote.yml
+++ b/.github/workflows/execute-tests-and-promote.yml
@@ -8,26 +8,49 @@ on:
pull_request: {}
jobs:
- lint-test:
- name: "lint-test"
+
+ lint: ########################################################################
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- - uses: ./.github/actions/execute-lint-test
- name: "execute lint-test action"
- generate:
- name: "generate"
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - shell: bash
+ run: |
+ make lint
+
+ generate: ####################################################################
runs-on: ubuntu-latest
+ env:
+ # Set DEV_REGISTRY to match BASE_REGISTRY.
+ DEV_REGISTRY: "docker.io/emissaryingress"
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- - uses: ./.github/actions/execute-generate
- name: "execute generate action"
- check-envoy-version:
- name: "check-envoy-version"
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - name: "Docker Login"
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
+ password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
+ - name: "'make generate'"
+ shell: bash
+ run: |
+ make generate
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check Git not dirty from 'make generate'"
+ - name: "'make generate' (again!)"
+ shell: bash
+ run: |
+ make generate
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check Git not dirty from 'make generate' (again!)"
+
+ check-envoy-version: #########################################################
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
@@ -36,25 +59,37 @@ jobs:
- name: Install Deps
uses: ./.github/actions/setup-deps
- run: make check-envoy-version
- go-tests:
+
+ # Tests ######################################################################
+ check-gotest:
runs-on: ubuntu-latest
- name: gotest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- - uses: ./.github/actions/execute-go-tests
- name: "execute go-tests action"
- # collect logs
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - name: "Docker Login"
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
+ password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
+ - name: make gotest
+ shell: bash
+ run: |
+ export PREFIX=gotests
+ export DEV_KUBE_NO_PVC=yes
+ export KAT_REQ_LIMIT=900
+ export TEST_XML_DIR=/tmp/test-logs/xml/
+ mkdir -p ${TEST_XML_DIR}
+ make gotest
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check git not dirty from testing"
- uses: ./.github/actions/collect-testing-logs
if: always()
name: "Collect testing logs"
- pytests:
+ check-pytest:
runs-on: ubuntu-latest
- env:
- DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
- DOCKER_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
- DOCKER_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -76,20 +111,42 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- - uses: ./.github/actions/execute-pytests
- name: "execute pytests action"
- # collect logs
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - name: "Docker Login"
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
+ password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
+ - name: make pytest-${{ matrix.test }}
+ uses: nick-invision/retry@v2.4.0
+ with:
+ max_attempts: 3
+ timeout_minutes: 20
+ command: |
+ export PREFIX=pytest-${{ matrix.test }}
+ export USE_LOCAL_K3S_CLUSTER=1
+ sudo sysctl -w fs.file-max=1600000
+ sudo sysctl -w fs.inotify.max_user_instances=4096
+
+ make ci/setup-k3d K3D_CLUSTER_NAME=amb-ci
+
+ export DEV_KUBE_NO_PVC=yes
+ export KAT_REQ_LIMIT=900
+ export TEST_XML_DIR=/tmp/test-logs/xml/
+ export DEV_KUBECONFIG=~/.kube/config
+ export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
+ mkdir -p ${TEST_XML_DIR}
+ make pytest-${{ matrix.test }}
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check git not dirty from testing"
- uses: ./.github/actions/collect-testing-logs
if: always()
name: "Collect testing logs"
- pytest-unit:
+ check-pytest-unit:
# pytest-unit is separate from pytests (above) because we know for certain that no cluster is needed.
# XXX This is pretty much a crock.
runs-on: ubuntu-latest
- env:
- DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
- DOCKER_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
- DOCKER_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
strategy:
matrix:
test:
@@ -99,14 +156,37 @@ jobs:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- - uses: ./.github/actions/execute-pytest-unit
- name: "execute pytest-unit action"
- # collect logs
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - name: "Docker Login"
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
+ password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
+ - name: make pytest-${{ matrix.test }}
+ uses: nick-invision/retry@v2.4.0
+ with:
+ max_attempts: 3
+ timeout_minutes: 20
+ command: |
+ export PREFIX=pytest-${{ matrix.test }}
+ sudo sysctl -w fs.file-max=1600000
+ sudo sysctl -w fs.inotify.max_user_instances=4096
+
+ export DEV_KUBE_NO_PVC=yes
+ export KAT_REQ_LIMIT=900
+ export TEST_XML_DIR=/tmp/test-logs/xml/
+ export DEV_KUBECONFIG=~/.kube/config
+ export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
+ mkdir -p ${TEST_XML_DIR}
+ make pytest-${{ matrix.test }}
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check git not dirty from testing"
- uses: ./.github/actions/collect-testing-logs
if: always()
name: "Collect testing logs"
- job-image:
- name: "job-image"
+
+ build: #######################################################################
runs-on: ubuntu-latest
env:
AWS_ACCESS_KEY_ID: ${{ secrets.GH_AWS_ACCESS_KEY_ID }}
@@ -114,26 +194,41 @@ jobs:
AWS_EC2_METADATA_DISABLED: true
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
- DOCKER_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
- DOCKER_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- - uses: ./.github/actions/execute-job-image
- name: "execute job-image action"
- promote-to-passed:
+ - name: Install Deps
+ uses: ./.github/actions/setup-deps
+ - name: "Docker Login"
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
+ password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
+ - name: "make push"
+ shell: bash
+ run: |
+ make push
+ - name: "make push-dev"
+ shell: bash
+ run: |
+ make push-dev
+ - uses: ./.github/actions/git-dirty-check
+ name: "Check git not dirty (from make push + make push-dev)"
+
+ ##############################################################################
+ pass:
+ name: "job-promote-to-passed" # This is the job name that the branch protection looks for
needs:
- - lint-test
- - job-image
+ - lint
+ - build
- generate
- check-envoy-version
- - go-tests
- - pytests
- - pytest-unit
+ - check-gotest
+ - check-pytest
+ - check-pytest-unit
runs-on: ubuntu-latest
- name: job-promote-to-passed
env:
AWS_ACCESS_KEY_ID: ${{ secrets.GH_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.GH_AWS_SECRET_ACCESS_KEY }}
diff --git a/.gitignore b/.gitignore
index 9db3d1e20c..7864ef6735 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,8 @@
-# Docker build pin files
-docker/*.pin
+*.img.tar
+*.layer.tar
+.*.stamp
+/docker/kat-server/server.crt
+/docker/kat-server/server.key
*.pyc
*~
@@ -34,15 +37,9 @@ charts/yq
vendor_bootstrap_hack.go
-/tools/sandbox/grpc_auth/docker-compose.yml
-/tools/sandbox/grpc_web/docker-compose.yaml
-/tools/sandbox/http_auth/docker-compose.yml
-
/docker/images.sh
/docker/images.tar
/docker/volume.tar
-/docker/container.txt
-/docker/container.txt.stamp
*.docker
*.docker.stamp
*.docker.tag.*
@@ -108,43 +105,12 @@ ambassador-secrets-deployment.yaml
# Remove the tail of this list when the commit making the change gets
# far enough in to the past.
+# 2022-01-31
+/docker/container.txt
+/docker/container.txt.stamp
# 2020-08-20
/cxx/envoy/
/cxx/envoy-static
/cxx/envoy-build-image.txt
/cxx/envoy-build-container.txt
/cxx/go-control-plane/
-# 2019-10-13
-/build/kat/client/kat_client
-/build/kat/client/teleproxy
-/build/kat/server/kat-server
-# 2019-10-13
-/.docker_port_forward
-# 2019-10-13
-/cluster.yaml
-/kubernaut-claim.txt
-# 2019-10-13
-/ambex
-/kubestatus
-/watt
-/cmd/ambex/ambex
-# 2019-10-11
-/envoy-src/
-/envoy-bin/
-/envoy-build-image.txt
-/envoy-build-container.txt
-# 2019-10-08
-/kat-sandbox/grpc_auth/docker-compose.yml
-/kat-sandbox/grpc_web/*_pb.js
-/kat-sandbox/grpc_web/docker-compose.yaml
-/kat-sandbox/http_auth/docker-compose.yml
-/kat-server-docker-image/kat-server
-/kat/docs/**/_build
-/unused-e2e/**/ambassador-deployment-mounts.yaml
-/unused-e2e/**/ambassador-deployment.yaml
-/unused-e2e/**/kubernaut
-/unused-e2e/*/k8s/ambassador-deployment*.yaml
-# 2019-06-14
-/envoy/
-# 2019-04-05 0388efe75c16540c71223320596accbbe3fe6ac2
-/kat/kat/client
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fcf0eedbc6..ffc284bdae 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -92,14 +92,22 @@ Please see the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest
### Emissary-ingress and Ambassador Edge Stack
+- Change: Support for the Envoy V2 API and the `AMBASSADOR_ENVOY_API_VERSION` environment variable
+ have been removed. Only the Envoy V3 API is supported (this has been the default since
+ Emissary-ingress v1.14.0).
+
- Feature: Emissary now supports the metric `ambassador_log_level{label="debug"}` which will be set
to 1 if debug logging is enabled for the running Emissary instance, or to 0 if not. This can help
to be sure that a running production instance was not actually left doing debugging logging, for
example. (Thanks to Fabrice!) ([3906])
-- Change: Support for the Envoy V2 API and the `AMBASSADOR_ENVOY_API_VERSION` environment variable
- have been removed. Only the Envoy V3 API is supported (this has been the default since
- Emissary-ingress v1.14.0).
+- Bugfix: Kubernetes Secrets that should contain TLS certificates are now validated before being
+ accepted for configuration. A Secret that contains an invalid TLS certificate will be logged as an
+ invalid resource. ([3821])
+
+- Change: Emissary will now watch for ConfigMap or Secret resources specified by the
+ `AGENT_CONFIG_RESOURCE_NAME` environment variable in order to allow all components (and not only
+ the Ambassador Agent) to authenticate requests to Ambassador Cloud.
- Feature: The Emissary agent now receive commands to manipulate Rollouts (pause, continue, and
abort are currently supported) via directives and executes them in the cluster. A report is send
@@ -108,6 +116,7 @@ Please see the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest
[3906]: https://github.com/emissary-ingress/emissary/issues/3906
[4040]: https://github.com/emissary-ingress/emissary/pull/4040
+[3821]: https://github.com/emissary-ingress/emissary/issues/3821
## [2.1.2] January 25, 2022
[2.1.2]: https://github.com/emissary-ingress/emissary/compare/v2.1.0...v2.1.2
diff --git a/DEVELOPING.md b/DEVELOPING.md
index 9696df593f..d11a489b75 100644
--- a/DEVELOPING.md
+++ b/DEVELOPING.md
@@ -46,7 +46,7 @@ ever find anything missing from this list.
- make
- docker (make sure you can run docker commands as your dev user without sudo)
- bash
- - rsync (with the --info option)
+ - rsync
- golang 1.15
- python 3.8 or 3.9
- kubectl
diff --git a/Makefile b/Makefile
index 8625e546ce..180088df91 100644
--- a/Makefile
+++ b/Makefile
@@ -8,9 +8,12 @@ include build-aux/tools.mk
# Bootstrapping the build env
ifneq ($(MAKECMDGOALS),$(OSS_HOME)/build-aux/go-version.txt)
$(_prelude.go.ensure)
- ifeq ($(shell go env GOPATH),$(shell go env GOROOT))
+ ifneq ($(filter $(shell go env GOROOT),$(subst :, ,$(shell go env GOPATH))),)
$(error Your $$GOPATH (where *your* Go stuff goes) and $$GOROOT (where Go *itself* is installed) are both set to the same directory ($(shell go env GOROOT)); it is remarkable that it has not blown up catastrophically before now)
endif
+ ifneq ($(foreach gopath,$(subst :, ,$(shell go env GOPATH)),$(filter $(gopath)/%,$(CURDIR))),)
+ $(error Your emissary.git checkout is inside of your $$GOPATH ($(shell go env GOPATH)); Emissary-ingress uses Go modules and so GOPATH need not be pointed at it (in a post-modules world, the only role of GOPATH is to store the module download cache); and indeed some of the Kubernetes tools will get confused if GOPATH is pointed at it)
+ endif
VERSION := $(or $(VERSION),$(shell go run ./tools/src/goversion))
$(if $(filter v2.%,$(VERSION)),\
@@ -30,8 +33,25 @@ ifneq ($(MAKECMDGOALS),$(OSS_HOME)/build-aux/go-version.txt)
$(info [make] CHART_VERSION=$(CHART_VERSION))
endif
+ifeq ($(SOURCE_DATE_EPOCH)$(shell git status --porcelain),)
+ SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct)
+endif
+ifneq ($(SOURCE_DATE_EPOCH),)
+ export SOURCE_DATE_EPOCH
+ $(info [make] SOURCE_DATE_EPOCH=$(SOURCE_DATE_EPOCH))
+endif
+
# Everything else...
+# BASE_REGISTRY is where the base images (as in
+# `builder/Dockerfile.base`) get pulled-from/pushed-to. We default
+# this to docker.io/emissaryingress rather than to $(DEV_REGISTRY) or
+# to a .local registry because rebuilding orjson takes so long, we
+# really want to cache it unless the dev really wants to force doing
+# everything locally.
+BASE_REGISTRY ?= docker.io/emissaryingress
+export BASE_REGISTRY
+
NAME ?= emissary
_git_remote_urls := $(shell git remote | xargs -n1 git remote get-url --all)
IS_PRIVATE ?= $(findstring private,$(_git_remote_urls))
@@ -39,6 +59,7 @@ IS_PRIVATE ?= $(findstring private,$(_git_remote_urls))
include $(OSS_HOME)/build-aux/ci.mk
include $(OSS_HOME)/build-aux/check.mk
include $(OSS_HOME)/builder/builder.mk
+include $(OSS_HOME)/build-aux/main.mk
include $(OSS_HOME)/_cxx/envoy.mk
include $(OSS_HOME)/charts/charts.mk
include $(OSS_HOME)/manifests/manifests.mk
diff --git a/build-aux/check.mk b/build-aux/check.mk
index f0afb3d3cc..60b03adae6 100644
--- a/build-aux/check.mk
+++ b/build-aux/check.mk
@@ -1,8 +1,52 @@
include build-aux/tools.mk
-# These are triggered by `python/kat/harness.py:TestImage`.
-test_svcs = auth ratelimit shadow stats
+# Keep this list in-sync with python/tests/integration/manifests.py
+push-pytest-images: docker/emissary.docker.push.remote
+push-pytest-images: docker/test-auth.docker.push.remote
+push-pytest-images: docker/test-shadow.docker.push.remote
+push-pytest-images: docker/test-stats.docker.push.remote
+push-pytest-images: docker/kat-client.docker.push.remote
+push-pytest-images: docker/kat-server.docker.push.remote
+.PHONY: push-pytest-images
+
+# test-{auth,shadow,stats}.docker
+test_svcs = auth shadow stats
$(foreach svc,$(test_svcs),docker/.test-$(svc).docker.stamp): docker/.%.docker.stamp: docker/%/Dockerfile FORCE
docker build --iidfile=$@ $( $@
+
+# kat-server.docker
+docker/kat-server.go.layer.tar: $(tools/ocibuild) $(tools/write-ifchanged) FORCE
+ GOFLAGS=-mod=mod $(tools/ocibuild) layer gobuild ./cmd/kat-server | $(tools/write-ifchanged) $@
+docker/kat-server.certs.layer.tar: $(tools/ocibuild) $(tools/write-ifchanged) docker/kat-server/server.crt docker/kat-server/server.key
+ $(tools/ocibuild) layer dir --prefix=work docker/kat-server | $(tools/write-ifchanged) $@
+docker/kat-server/server.crt: $(tools/testcert-gen)
+ mkdir -p $(@D)
+ $(tools/testcert-gen) --out-cert=$@ --out-key=/dev/null --hosts=kat-server.test.getambassador.io
+docker/kat-server/server.key: $(tools/testcert-gen)
+ mkdir -p $(@D)
+ $(tools/testcert-gen) --out-cert=/dev/null --out-key=$@ --hosts=kat-server.test.getambassador.io
+docker/.kat-server.img.tar.stamp: $(tools/ocibuild) docker/base.img.tar docker/kat-server.go.layer.tar docker/kat-server.certs.layer.tar
+ { $(tools/ocibuild) image build \
+ --base=docker/base.img.tar \
+ --config.Env.append=GRPC_VERBOSITY=debug \
+ --config.Env.append=GRPC_TRACE=tcp,http,api \
+ --config.WorkingDir='/work' \
+ --config.Cmd='kat-server' \
+ --tag=emissary.local/kat-server:latest \
+ <($(tools/ocibuild) layer squash $(filter %.layer.tar,$^)); } > $@
diff --git a/build-aux/generate.mk b/build-aux/generate.mk
index 80db17d6a5..14a3942242 100644
--- a/build-aux/generate.mk
+++ b/build-aux/generate.mk
@@ -41,8 +41,6 @@ generate/precious =
# Whole directories with rules for each individual file in it
generate/files += $(patsubst $(OSS_HOME)/api/%.proto, $(OSS_HOME)/pkg/api/%.pb.go , $(shell find $(OSS_HOME)/api/kat/ -name '*.proto')) $(OSS_HOME)/pkg/api/kat/
generate/files += $(patsubst $(OSS_HOME)/api/%.proto, $(OSS_HOME)/pkg/api/%.pb.go , $(shell find $(OSS_HOME)/api/agent/ -name '*.proto')) $(OSS_HOME)/pkg/api/agent/
-generate/files += $(patsubst $(OSS_HOME)/api/kat/%.proto, $(OSS_HOME)/tools/sandbox/grpc_web/%_pb.js , $(shell find $(OSS_HOME)/api/kat/ -name '*.proto')) # XXX: There are other files in this dir
-generate/files += $(patsubst $(OSS_HOME)/api/kat/%.proto, $(OSS_HOME)/tools/sandbox/grpc_web/%_grpc_web_pb.js , $(shell find $(OSS_HOME)/api/kat/ -name '*.proto')) # XXX: There are other files in this dir
# Whole directories with one rule for the whole directory
generate/files += $(OSS_HOME)/api/envoy/
generate/files += $(OSS_HOME)/api/pb/
@@ -50,7 +48,6 @@ generate/files += $(OSS_HOME)/pkg/api/envoy/
generate/files += $(OSS_HOME)/pkg/api/pb/
generate/files += $(OSS_HOME)/pkg/envoy-control-plane/
# Individual files: Misc
-generate/files += $(OSS_HOME)/docker/test-ratelimit/ratelimit.proto
generate/files += $(OSS_HOME)/OPENSOURCE.md
generate/files += $(OSS_HOME)/LICENSES.md
generate/files += $(OSS_HOME)/builder/requirements.txt
@@ -69,12 +66,8 @@ generate-fast/files += $(OSS_HOME)/python/tests/integration/manifests/crds.yaml
generate-fast/files += $(OSS_HOME)/python/tests/integration/manifests/rbac_cluster_scope.yaml
generate-fast/files += $(OSS_HOME)/python/tests/integration/manifests/rbac_namespace_scope.yaml
# Individual files: Test TLS Certificates
-generate-fast/files += $(OSS_HOME)/builder/server.crt
-generate-fast/files += $(OSS_HOME)/builder/server.key
generate-fast/files += $(OSS_HOME)/docker/test-auth/authsvc.crt
generate-fast/files += $(OSS_HOME)/docker/test-auth/authsvc.key
-generate-fast/files += $(OSS_HOME)/docker/test-ratelimit/ratelimit.crt
-generate-fast/files += $(OSS_HOME)/docker/test-ratelimit/ratelimit.key
generate-fast/files += $(OSS_HOME)/docker/test-shadow/shadowsvc.crt
generate-fast/files += $(OSS_HOME)/docker/test-shadow/shadowsvc.key
generate-fast/files += $(OSS_HOME)/python/tests/selfsigned.py
@@ -93,7 +86,6 @@ _generate:
generate-clean: ## Delete generated sources that get committed to Git
rm -rf $(filter-out $(generate/precious),$(generate/files))
- rm -f $(OSS_HOME)/tools/sandbox/grpc_web/*_pb.js # This corresponds to the "# XXX: There are other files in this dir" comments above
find $(OSS_HOME)/pkg/api/getambassador.io -name 'zz_generated.*.go' -print -delete # generated as a side-effect of other files
.PHONY: generate-clean
@@ -165,32 +157,14 @@ $(OSS_HOME)/pkg/envoy-control-plane: $(OSS_HOME)/_cxx/go-control-plane FORCE
}
cd $(OSS_HOME) && gofmt -w -s ./pkg/envoy-control-plane/
-$(OSS_HOME)/docker/test-ratelimit/ratelimit.proto:
- set -e; { \
- url=https://raw.githubusercontent.com/envoyproxy/ratelimit/v1.3.0/proto/ratelimit/ratelimit.proto; \
- echo "// Downloaded from $$url"; \
- echo; \
- curl --fail -L "$$url"; \
- } > $@
-
#
# `make generate` certificate generation
-$(OSS_HOME)/builder/server.crt: $(tools/testcert-gen)
- $(tools/testcert-gen) --out-cert=$@ --out-key=/dev/null --hosts=kat-server.test.getambassador.io
-$(OSS_HOME)/builder/server.key: $(tools/testcert-gen)
- $(tools/testcert-gen) --out-cert=/dev/null --out-key=$@ --hosts=kat-server.test.getambassador.io
-
$(OSS_HOME)/docker/test-auth/authsvc.crt: $(tools/testcert-gen)
$(tools/testcert-gen) --out-cert=$@ --out-key=/dev/null --hosts=authsvc.datawire.io
$(OSS_HOME)/docker/test-auth/authsvc.key: $(tools/testcert-gen)
$(tools/testcert-gen) --out-cert=/dev/null --out-key=$@ --hosts=authsvc.datawire.io
-$(OSS_HOME)/docker/test-ratelimit/ratelimit.crt: $(tools/testcert-gen)
- $(tools/testcert-gen) --out-cert=$@ --out-key=/dev/null --hosts=ratelimit.datawire.io
-$(OSS_HOME)/docker/test-ratelimit/ratelimit.key: $(tools/testcert-gen)
- $(tools/testcert-gen) --out-cert=/dev/null --out-key=$@ --hosts=ratelimit.datawire.io
-
$(OSS_HOME)/docker/test-shadow/shadowsvc.crt: $(tools/testcert-gen)
$(tools/testcert-gen) --out-cert=$@ --out-key=/dev/null --hosts=demosvc.datawire.io
$(OSS_HOME)/docker/test-shadow/shadowsvc.key: $(tools/testcert-gen)
@@ -267,19 +241,6 @@ $(OSS_HOME)/pkg/api/%.pb.go: $(OSS_HOME)/api/%.proto $(tools/protoc) $(tools/pro
$(call protoc,go,$(OSS_HOME)/pkg/api,\
$(tools/protoc-gen-go))
-proto_options/js += import_style=commonjs
-$(OSS_HOME)/_generate.tmp/%_pb.js: $(OSS_HOME)/api/%.proto $(tools/protoc)
- $(call protoc,js,$(OSS_HOME)/_generate.tmp)
-
-proto_options/grpc-web += import_style=commonjs
-proto_options/grpc-web += mode=grpcwebtext
-$(OSS_HOME)/_generate.tmp/%_grpc_web_pb.js: $(OSS_HOME)/api/%.proto $(tools/protoc) $(tools/protoc-gen-grpc-web)
- $(call protoc,grpc-web,$(OSS_HOME)/_generate.tmp,\
- $(tools/protoc-gen-grpc-web))
-
-$(OSS_HOME)/tools/sandbox/grpc_web/%.js: $(OSS_HOME)/_generate.tmp/kat/%.js
- cp $< $@
-
clean: _generate_clean
_generate_clean:
rm -rf $(OSS_HOME)/_generate.tmp
@@ -425,10 +386,6 @@ $(OSS_HOME)/python/tests/integration/manifests/crds.yaml: $(OSS_HOME)/_generate.
$(OSS_HOME)/pkg/api/getambassador.io/crds.yaml: $(OSS_HOME)/_generate.tmp/crds $(tools/fix-crds)
$(tools/fix-crds) --target=internal-validator $(sort $(wildcard $*.yaml)) >$@
-python-setup: create-venv
- $(OSS_HOME)/venv/bin/python -m pip install ruamel.yaml
-.PHONY: python-setup
-
helm.name.emissary-emissaryns = emissary-ingress
helm.name.emissary-defaultns = emissary-ingress
helm.namespace.emissary-emissaryns = emissary
@@ -442,9 +399,8 @@ $(OSS_HOME)/k8s-config/%/helm-expanded.yaml: \
$(OSS_HOME)/k8s-config/%/output.yaml: \
$(OSS_HOME)/k8s-config/%/helm-expanded.yaml \
$(OSS_HOME)/k8s-config/%/require.yaml \
- $(OSS_HOME)/k8s-config/create_yaml.py \
- python-setup
- . $(OSS_HOME)/venv/bin/activate && $(filter %.py,$^) $(filter %/helm-expanded.yaml,$^) $(filter %/require.yaml,$^) >$@
+ $(tools/filter-yaml)
+ $(tools/filter-yaml) $(filter %/helm-expanded.yaml,$^) $(filter %/require.yaml,$^) >$@
$(OSS_HOME)/manifests/emissary/%.yaml.in: $(OSS_HOME)/k8s-config/%/output.yaml
cp $< $@
$(OSS_HOME)/python/tests/integration/manifests/%.yaml: $(OSS_HOME)/k8s-config/kat-%/output.yaml
@@ -457,8 +413,8 @@ $(OSS_HOME)/python/tests/integration/manifests/rbac_namespace_scope.yaml: $(OSS_
#
# Generate report on dependencies
-$(OSS_HOME)/build-aux/pip-show.txt: sync
- docker exec $$($(BUILDER)) sh -c 'pip freeze --exclude-editable | cut -d= -f1 | xargs pip show' > $@
+$(OSS_HOME)/build-aux/pip-show.txt: docker/builder-base.docker
+ docker run --rm "$$(cat docker/builder-base.docker)" sh -c 'pip freeze --exclude-editable | cut -d= -f1 | xargs pip show' > $@
$(OSS_HOME)/builder/requirements.txt: %.txt: %.in FORCE
$(BUILDER) pip-compile
diff --git a/build-aux/kat.mk b/build-aux/kat.mk
deleted file mode 100644
index 52ce21123c..0000000000
--- a/build-aux/kat.mk
+++ /dev/null
@@ -1,32 +0,0 @@
-# ------------------------------------------------------------------------------
-# KAT docker-compose sandbox
-# ------------------------------------------------------------------------------
-
-tools/sandbox/http_auth/docker-compose.yml tools/sandbox/grpc_auth/docker-compose.yml tools/sandbox/grpc_web/docker-compose.yaml: %: %.in kat-server.docker.push.dev
- sed "s,@KAT_SERVER_DOCKER_IMAGE@,$$(cat kat-server.docker.push.dev),g" < $< > $@
-
-tools/sandbox.http-auth: ## In docker-compose: run Ambassador, an HTTP AuthService, an HTTP backend service, and a TracingService
-tools/sandbox.http-auth: tools/sandbox/http_auth/docker-compose.yml
- @echo " ---> cleaning HTTP auth tools/sandbox"
- @cd tools/sandbox/http_auth && docker-compose stop && docker-compose rm -f
- @echo " ---> starting HTTP auth tools/sandbox"
- @cd tools/sandbox/http_auth && docker-compose up --force-recreate --abort-on-container-exit --build
-.PHONY: tools/sandbox.http-auth
-
-tools/sandbox.grpc-auth: ## In docker-compose: run Ambassador, a gRPC AuthService, an HTTP backend service, and a TracingService
-tools/sandbox.grpc-auth: tools/sandbox/grpc_auth/docker-compose.yml
- @echo " ---> cleaning gRPC auth tools/sandbox"
- @cd tools/sandbox/grpc_auth && docker-compose stop && docker-compose rm -f
- @echo " ---> starting gRPC auth tools/sandbox"
- @cd tools/sandbox/grpc_auth && docker-compose up --force-recreate --abort-on-container-exit --build
-.PHONY: tools/sandbox.grpc-auth
-
-tools/sandbox.web: ## In docker-compose: run Ambassador with gRPC-web enabled, and a gRPC backend service
-tools/sandbox.web: tools/sandbox/grpc_web/docker-compose.yaml
-tools/sandbox.web: tools/sandbox/grpc_web/echo_grpc_web_pb.js tools/sandbox/grpc_web/echo_pb.js
- @echo " ---> cleaning gRPC web tools/sandbox"
- @cd tools/sandbox/grpc_web && npm install && npx webpack
- @cd tools/sandbox/grpc_web && docker-compose stop && docker-compose rm -f
- @echo " ---> starting gRPC web tools/sandbox"
- @cd tools/sandbox/grpc_web && docker-compose up --force-recreate --abort-on-container-exit --build
-.PHONY: tools/sandbox.web
diff --git a/build-aux/main.mk b/build-aux/main.mk
new file mode 100644
index 0000000000..11168cf624
--- /dev/null
+++ b/build-aux/main.mk
@@ -0,0 +1,22 @@
+include build-aux/tools.mk
+
+# For files that should only-maybe update when the rule runs, put ".stamp" on
+# the left-side of the ":", and just go ahead and update it within the rule.
+#
+# ".stamp" should NEVER appear in a dependency list (that is, it
+# should never be on the right-side of the ":"), save for in this rule
+# itself.
+%: %.stamp $(tools/copy-ifchanged)
+ @$(tools/copy-ifchanged) $< $@
+docker/%: docker/.%.stamp $(tools/copy-ifchanged)
+ $(tools/copy-ifchanged) $< $@
+
+_ocibuild-images = base
+_ocibuild-images += kat-client
+_ocibuild-images += kat-server
+$(foreach img,$(_ocibuild-images),docker/.$(img).docker.stamp): docker/.%.docker.stamp: docker/%.img.tar
+ docker load < $<
+ docker inspect $$(bsdtar xfO $< manifest.json|jq -r '.[0].RepoTags[0]') --format='{{.Id}}' > $@
+
+docker/.base.img.tar.stamp: FORCE $(tools/crane) builder/Dockerfile
+ $(tools/crane) pull $(shell sed -n 's,ARG base=,,p' < builder/Dockerfile) $@ || test -e $@
diff --git a/build-aux/tools.mk b/build-aux/tools.mk
index c251e24695..2652969c92 100644
--- a/build-aux/tools.mk
+++ b/build-aux/tools.mk
@@ -47,9 +47,11 @@ $(tools.bindir)/%: $(tools.srcdir)/%.sh
tools/chart-doc-gen = $(tools.bindir)/chart-doc-gen
tools/controller-gen = $(tools.bindir)/controller-gen
tools/conversion-gen = $(tools.bindir)/conversion-gen
+tools/crane = $(tools.bindir)/crane
tools/go-mkopensource = $(tools.bindir)/go-mkopensource
tools/golangci-lint = $(tools.bindir)/golangci-lint
tools/kubestatus = $(tools.bindir)/kubestatus
+tools/ocibuild = $(tools.bindir)/ocibuild
tools/protoc-gen-go = $(tools.bindir)/protoc-gen-go
tools/yq = $(tools.bindir)/yq
$(tools.bindir)/%: $(tools.srcdir)/%/pin.go $(tools.srcdir)/%/go.mod
@@ -67,6 +69,7 @@ $(tools.main-gomod): $(tools.bindir)/%: $(tools.srcdir)/%/pin.go $(OSS_HOME)/go.
# ================
#
tools/dsum = $(tools.bindir)/dsum
+tools/filter-yaml = $(tools.bindir)/filter-yaml
tools/fix-crds = $(tools.bindir)/fix-crds
tools/flock = $(tools.bindir)/flock
tools/gotest2tap = $(tools.bindir)/gotest2tap
diff --git a/builder/Dockerfile b/builder/Dockerfile
index 279e6fc943..9cddbdd30b 100644
--- a/builder/Dockerfile
+++ b/builder/Dockerfile
@@ -41,9 +41,6 @@ WORKDIR /buildroot
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/buildroot/bin
-# Some of the tests need redis-server to run
-RUN apk add redis
-
COPY --from=envoy /usr/local/bin/envoy-static-stripped /usr/local/bin/envoy
ENV KUBECONFIG=/buildroot/kubeconfig.yaml
@@ -88,7 +85,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
# The artifact build stage
########################################
-FROM ${builderbase} as artifact-stage
+FROM ${builderbase} as artifacts
WORKDIR /buildroot/ambassador
@@ -120,8 +117,6 @@ ADD manifests/emissary/emissary-crds.yaml manifests/emissary/emissary-crds.yaml
# The optimized images
########################################
-FROM artifact-stage as artifacts
-
FROM ${base} as ambassador
ARG py_version="i-forgot-to-set-build-arg-py-version"
@@ -185,27 +180,3 @@ ENV HOME=/tmp/ambassador
COPY --from=artifacts /bin/busybox /bin/busybox
ENTRYPOINT [ "bash", "/buildroot/ambassador/python/entrypoint.sh" ]
-
-########################################
-
-FROM ${base} as kat-client
-
-COPY --from=artifacts /buildroot/bin/kat-client /usr/bin/kat-client
-RUN mkdir /work && ln -s /usr/bin/kat-client /work/kat_client
-CMD [ "sleep", "3600" ]
-
-########################################
-
-FROM ${base} as kat-server
-
-COPY --from=artifacts /buildroot/bin/kat-server /usr/bin/kat-server
-
-WORKDIR /work
-COPY builder/server.crt server.crt
-COPY builder/server.key server.key
-
-ENV GRPC_VERBOSITY=debug
-ENV GRPC_TRACE=tcp,http,api
-
-EXPOSE 8080
-CMD [ "kat-server" ]
diff --git a/builder/Dockerfile.base b/builder/Dockerfile.base
index 591443fc1f..51e1aedfa8 100644
--- a/builder/Dockerfile.base
+++ b/builder/Dockerfile.base
@@ -31,7 +31,6 @@ ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local
RUN apk --no-cache add \
bash \
- bash-completion \
gcc \
make \
musl-dev \
@@ -45,12 +44,8 @@ RUN apk --no-cache add \
libcap-dev \
libffi-dev \
ncurses \
- nodejs \
- npm \
- openssh-client \
openssl-dev \
py3-pip \
- py3-pip-bash-completion \
python3=~3.8.10 \
python3-dev \
rsync \
@@ -75,24 +70,10 @@ RUN mkdir /tmp/busybox \
# PEP600 compatible wheels for our linux platform, or we start building orjson
# from source using a rust toolchain.
RUN pip3 install -U pip==20.2.4 pip-tools==5.3.1
-RUN npm config set user 0 && \
- npm config set unsafe-perm true && \
- npm install -g rollup@2.9.1 rollup-plugin-terser@5.3.0
RUN curl --fail -L https://dl.google.com/go/go1.17.1.linux-amd64.tar.gz | tar -C /usr/local -xzf -
RUN curl --fail -L https://storage.googleapis.com/kubernetes-release/release/v1.22.2/bin/linux/amd64/kubectl -o /usr/bin/kubectl && \
- chmod a+x /usr/bin/kubectl && \
- kubectl completion bash > /usr/share/bash-completion/completions/kubectl
-RUN curl --fail -L https://raw.githubusercontent.com/ahmetb/kubectx/v0.7.0/kubectx -o /usr/local/bin/kubectx && chmod a+x /usr/local/bin/kubectx && \
- curl --fail -L https://raw.githubusercontent.com/ahmetb/kubectx/v0.7.0/completion/kubectx.bash -o /usr/share/bash-completion/completions/kubectx
-RUN curl --fail -L https://raw.githubusercontent.com/ahmetb/kubectx/v0.7.0/kubens -o /usr/local/bin/kubens && chmod a+x /usr/local/bin/kubens && \
- curl --fail -L https://raw.githubusercontent.com/ahmetb/kubectx/v0.7.0/completion/kubens.bash -o /usr/share/bash-completion/completions/kubens
-RUN curl --fail -L https://github.com/gotestyourself/gotestsum/releases/download/v0.6.0/gotestsum_0.6.0_linux_amd64.tar.gz | tar -C /usr/local/bin -xzf -
-RUN mkdir helmtmp && curl -o helmtmp/helm.tar.gz https://get.helm.sh/helm-v3.4.1-linux-amd64.tar.gz && tar -zxvf helmtmp/helm.tar.gz -C helmtmp && mv helmtmp/linux-amd64/helm /usr/local/bin/helm && rm -rf helmtmp
-RUN curl --fail -L https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence -o /usr/local/bin/telepresence && chmod a+x /usr/local/bin/telepresence
-RUN curl --fail -L https://raw.githubusercontent.com/jonmosco/kube-ps1/v0.7.0/kube-ps1.sh -o /usr/local/bin/kube-ps1.sh
-RUN curl -o /usr/local/bin/yq --fail -L https://github.com/mikefarah/yq/releases/download/v4.3.1/yq_linux_amd64 && \
- chmod a+x /usr/local/bin/yq
+ chmod a+x /usr/bin/kubectl
# The YAML parser is... special. To get the C version, we need to install Cython and libyaml, then
# build it locally -- just using pip won't work.
diff --git a/builder/builder.mk b/builder/builder.mk
index 5f47d60ea8..0dd4f83b27 100644
--- a/builder/builder.mk
+++ b/builder/builder.mk
@@ -172,16 +172,12 @@ else
# ...and I (lukeshu) couldn't figure out a good way to do it on old (net-tools) GNU/Linux.
endif
-RSYNC_ERR = $(RED)ERROR: please update to a version of rsync with the --info option$(END)
GO_ERR = $(RED)ERROR: please update to go 1.13 or newer$(END)
DOCKER_ERR = $(RED)ERROR: please update to a version of docker built with Go 1.13 or newer$(END)
preflight:
@printf "$(CYN)==> $(GRN)Preflight checks$(END)\n"
- @echo "Checking that 'rsync' is installed and is new enough to support '--info'"
- @$(if $(shell rsync --help 2>/dev/null | grep -F -- --info),,printf '%s\n' $(call quote.shell,$(RSYNC_ERR)))
-
@echo "Checking that 'go' is installed and is 1.13 or later"
@$(if $(call _prelude.go.VERSION.HAVE,1.13),,printf '%s\n' $(call quote.shell,$(GO_ERR)))
@@ -206,21 +202,6 @@ preflight-cluster: $(tools/kubectl)
fi
.PHONY: preflight-cluster
-sync: docker/container.txt $(tools/kubectl)
- @printf "${CYN}==> ${GRN}Syncing sources in to builder container${END}\n"
- @$(foreach MODULE,$(MODULES),$(BUILDER) sync $(MODULE) $(SOURCE_$(MODULE)) &&) true
- @if [ -n "$(DEV_KUBECONFIG)" ] && [ "$(DEV_KUBECONFIG)" != '-skip-for-release-' ]; then \
- $(tools/kubectl) --kubeconfig $(DEV_KUBECONFIG) config view --flatten | docker exec -i $$(cat $<) sh -c "cat > /buildroot/kubeconfig.yaml" ;\
- fi
- @if [ -e ~/.docker/config.json ]; then \
- cat ~/.docker/config.json | docker exec -i $$(cat $<) sh -c "mkdir -p /home/dw/.docker && cat > /home/dw/.docker/config.json" ; \
- fi
- @if [ -n "$(GCLOUD_CONFIG)" ]; then \
- printf "Copying gcloud config to builder container\n"; \
- docker cp $(GCLOUD_CONFIG) $$(cat $<):/home/dw/.config/; \
- fi
-.PHONY: sync
-
builder:
@$(BUILDER) builder
.PHONY: builder
@@ -235,33 +216,17 @@ python/ambassador.version: $(tools/write-ifchanged) FORCE
git rev-parse HEAD; \
} | $(tools/write-ifchanged) $@
-compile: sync
- @$(BUILDER) compile
-.PHONY: compile
-
-# For files that should only-maybe update when the rule runs, put ".stamp" on
-# the left-side of the ":", and just go ahead and update it within the rule.
-#
-# ".stamp" should NEVER appear in a dependency list (that is, it
-# should never be on the right-side of the ":"), save for in this rule
-# itself.
-%: %.stamp $(tools/copy-ifchanged)
- @$(tools/copy-ifchanged) $< $@
-
# Give Make a hint about which pattern rules to apply. Honestly, I'm
# not sure why Make isn't figuring it out on its own, but it isn't.
_images = builder-base base-envoy $(LCNAME) kat-client kat-server
$(foreach i,$(_images), docker/$i.docker.tag.local ): docker/%.docker.tag.local : docker/%.docker
$(foreach i,$(_images), docker/$i.docker.tag.remote ): docker/%.docker.tag.remote: docker/%.docker
-docker/builder-base.docker.stamp: FORCE preflight
+docker/.builder-base.docker.stamp: FORCE preflight
@printf "${CYN}==> ${GRN}Bootstrapping builder base image${END}\n"
@$(BUILDER) build-builder-base >$@
-docker/container.txt.stamp: %/container.txt.stamp: %/builder-base.docker.tag.local %/base-envoy.docker.tag.local FORCE
- @printf "${CYN}==> ${GRN}Bootstrapping builder container${END}\n"
- @($(BOOTSTRAP_EXTRAS) $(BUILDER) bootstrap > $@)
-docker/base-envoy.docker.stamp: FORCE
+docker/.base-envoy.docker.stamp: FORCE
@set -e; { \
if docker image inspect $(ENVOY_DOCKER_TAG) --format='{{ .Id }}' >$@ 2>/dev/null; then \
printf "${CYN}==> ${GRN}Base Envoy image is already pulled${END}\n"; \
@@ -273,7 +238,7 @@ docker/base-envoy.docker.stamp: FORCE
fi; \
echo $(ENVOY_DOCKER_TAG) >$@; \
}
-docker/$(LCNAME).docker.stamp: %/$(LCNAME).docker.stamp: %/base-envoy.docker.tag.local %/builder-base.docker python/ambassador.version $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE
+docker/.$(LCNAME).docker.stamp: %/.$(LCNAME).docker.stamp: %/base-envoy.docker.tag.local %/builder-base.docker python/ambassador.version $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE
@printf "${CYN}==> ${GRN}Building image ${BLU}$(LCNAME)${END}\n"
@printf " ${BLU}envoy=$$(cat $*/base-envoy.docker)${END}\n"
@printf " ${BLU}builderbase=$$(cat $*/builder-base.docker)${END}\n"
@@ -282,26 +247,6 @@ docker/$(LCNAME).docker.stamp: %/$(LCNAME).docker.stamp: %/base-envoy.docker.tag
--build-arg=envoy="$$(cat $*/base-envoy.docker)" \
--build-arg=builderbase="$$(cat $*/builder-base.docker)" \
--build-arg=py_version="$$(cat build-aux/py-version.txt)" \
- --target=ambassador \
- --iidfile=$@; }
-
-docker/kat-client.docker.stamp: %/kat-client.docker.stamp: %/base-envoy.docker.tag.local %/builder-base.docker $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE
- @printf "${CYN}==> ${GRN}Building image ${BLU}kat-client${END}\n"
- { $(tools/dsum) 'kat-client build' 3s \
- docker build -f ${BUILDER_HOME}/Dockerfile . \
- --build-arg=envoy="$$(cat $*/base-envoy.docker)" \
- --build-arg=builderbase="$$(cat $*/builder-base.docker)" \
- --build-arg=py_version="$$(cat build-aux/py-version.txt)" \
- --target=kat-client \
- --iidfile=$@; }
-docker/kat-server.docker.stamp: %/kat-server.docker.stamp: %/base-envoy.docker.tag.local %/builder-base.docker $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE
- @printf "${CYN}==> ${GRN}Building image ${BLU}kat-server${END}\n"
- { $(tools/dsum) 'kat-server build' 3s \
- docker build -f ${BUILDER_HOME}/Dockerfile . \
- --build-arg=envoy="$$(cat $*/base-envoy.docker)" \
- --build-arg=builderbase="$$(cat $*/builder-base.docker)" \
- --build-arg=py_version="$$(cat build-aux/py-version.txt)" \
- --target=kat-server \
--iidfile=$@; }
REPO=$(BUILDER_NAME)
@@ -398,8 +343,6 @@ docker/run/%: docker/builder-base.docker
-e INTERACTIVE_GROUP=$$(id -g -n) \
-e PYTEST_ARGS="$$PYTEST_ARGS" \
-e AMBASSADOR_DOCKER_IMAGE="$$AMBASSADOR_DOCKER_IMAGE" \
- -e KAT_CLIENT_DOCKER_IMAGE="$$KAT_CLIENT_DOCKER_IMAGE" \
- -e KAT_SERVER_DOCKER_IMAGE="$$KAT_SERVER_DOCKER_IMAGE" \
-e DEV_KUBECONFIG="$$DEV_KUBECONFIG" \
-v /etc/resolv.conf:/etc/resolv.conf \
-v /var/run/docker.sock:/var/run/docker.sock \
@@ -418,47 +361,52 @@ docker/run/shell:
setup-envoy: extract-bin-envoy
-pytest: docker/$(LCNAME).docker.push.remote
-pytest: docker/kat-client.docker.push.remote
-pytest: docker/kat-server.docker.push.remote
+pytest: push-pytest-images
pytest: $(tools/kubestatus)
pytest: $(tools/kubectl)
- @$(MAKE) setup-diagd
- @$(MAKE) setup-envoy
- @$(MAKE) proxy
+pytest: $(OSS_HOME)/venv
+pytest: setup-envoy
+pytest: proxy
@printf "$(CYN)==> $(GRN)Running $(BLU)py$(GRN) tests$(END)\n"
@echo "AMBASSADOR_DOCKER_IMAGE=$$AMBASSADOR_DOCKER_IMAGE"
- @echo "KAT_CLIENT_DOCKER_IMAGE=$$KAT_CLIENT_DOCKER_IMAGE"
- @echo "KAT_SERVER_DOCKER_IMAGE=$$KAT_SERVER_DOCKER_IMAGE"
@echo "DEV_KUBECONFIG=$$DEV_KUBECONFIG"
@echo "KAT_RUN_MODE=$$KAT_RUN_MODE"
@echo "PYTEST_ARGS=$$PYTEST_ARGS"
- . $(OSS_HOME)/venv/bin/activate; \
- $(OSS_HOME)/builder/builder.sh pytest-local
+ mkdir -p $(or $(TEST_XML_DIR),/tmp/test-data)
+ set -e; { \
+ . $(OSS_HOME)/venv/bin/activate; \
+ export SOURCE_ROOT=$(CURDIR); \
+ export ENVOY_PATH=$(CURDIR)/bin/envoy; \
+ export KUBESTATUS_PATH=$(CURDIR)/tools/bin/kubestatus; \
+ pytest --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html --junitxml=$(or $(TEST_XML_DIR),/tmp/test-data)/pytest.xml --tb=short -rP $(PYTEST_ARGS); \
+ }
.PHONY: pytest
-pytest-unit:
+pytest-unit: setup-envoy $(OSS_HOME)/venv
@printf "$(CYN)==> $(GRN)Running $(BLU)py$(GRN) unit tests$(END)\n"
- @$(MAKE) setup-envoy
- @$(MAKE) setup-diagd
- . $(OSS_HOME)/venv/bin/activate; \
- PYTEST_ARGS="$$PYTEST_ARGS python/tests/unit" $(OSS_HOME)/builder/builder.sh pytest-local-unit
+ mkdir -p $(or $(TEST_XML_DIR),/tmp/test-data)
+ set -e; { \
+ . $(OSS_HOME)/venv/bin/activate; \
+ export SOURCE_ROOT=$(CURDIR); \
+ export ENVOY_PATH=$(CURDIR)/bin/envoy; \
+ pytest --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html --junitxml=$(or $(TEST_XML_DIR),/tmp/test-data)/pytest.xml --tb=short -rP $(PYTEST_ARGS) python/tests/unit; \
+ }
.PHONY: pytest-unit
-pytest-integration:
+pytest-integration: push-pytest-images
@printf "$(CYN)==> $(GRN)Running $(BLU)py$(GRN) integration tests$(END)\n"
$(MAKE) pytest PYTEST_ARGS="$$PYTEST_ARGS python/tests/integration"
.PHONY: pytest-integration
-pytest-kat-local:
+pytest-kat-local: push-pytest-images
$(MAKE) pytest PYTEST_ARGS="$$PYTEST_ARGS python/tests/kat"
-pytest-kat-envoy3: # doing this all at once is too much for CI...
+pytest-kat-envoy3: push-pytest-images # doing this all at once is too much for CI...
$(MAKE) pytest KAT_RUN_MODE=envoy PYTEST_ARGS="$$PYTEST_ARGS python/tests/kat"
-pytest-kat-envoy3-%: # ... so we have a separate rule to run things split up
+pytest-kat-envoy3-%: push-pytest-images # ... so we have a separate rule to run things split up
$(MAKE) pytest KAT_RUN_MODE=envoy PYTEST_ARGS="$$PYTEST_ARGS --letter-range $* python/tests/kat"
-pytest-kat-envoy2: # doing this all at once is too much for CI...
+pytest-kat-envoy2: push-pytest-images # doing this all at once is too much for CI...
$(MAKE) pytest KAT_RUN_MODE=envoy AMBASSADOR_ENVOY_API_VERSION=V2 PYTEST_ARGS="$$PYTEST_ARGS python/tests/kat"
-pytest-kat-envoy2-%: # ... so we have a separate rule to run things split up
+pytest-kat-envoy2-%: push-pytest-images # ... so we have a separate rule to run things split up
$(MAKE) pytest KAT_RUN_MODE=envoy AMBASSADOR_ENVOY_API_VERSION=V2 PYTEST_ARGS="$$PYTEST_ARGS --letter-range $* python/tests/kat"
.PHONY: pytest-kat-%
@@ -472,52 +420,15 @@ extract-bin-envoy: docker/base-envoy.docker.tag.local
@chmod +x $(OSS_HOME)/bin/envoy
.PHONY: extract-bin-envoy
-pytest-builder: test-ready
- $(MAKE) pytest-builder-only
-.PHONY: pytest-builder
-
-pytest-envoy3-builder:
- $(MAKE) pytest-builder KAT_RUN_MODE=envoy
-.PHONY: pytest-envoy3-builder
-
-pytest-envoy2-builder:
- $(MAKE) pytest-builder KAT_RUN_MODE=envoy AMBASSADOR_ENVOY_API_VERSION=V2
-.PHONY: pytest-envoy2-builder
-
-pytest-builder-only: sync preflight-cluster | docker/$(LCNAME).docker.push.remote docker/kat-client.docker.push.remote docker/kat-server.docker.push.remote
- @printf "$(CYN)==> $(GRN)Running $(BLU)py$(GRN) tests in builder shell$(END)\n"
- docker exec \
- -e AMBASSADOR_DOCKER_IMAGE=$$(sed -n 2p docker/$(LCNAME).docker.push.remote) \
- -e KAT_CLIENT_DOCKER_IMAGE=$$(sed -n 2p docker/kat-client.docker.push.remote) \
- -e KAT_SERVER_DOCKER_IMAGE=$$(sed -n 2p docker/kat-server.docker.push.remote) \
- -e KAT_IMAGE_PULL_POLICY=Always \
- -e DOCKER_NETWORK=$(DOCKER_NETWORK) \
- -e KAT_REQ_LIMIT \
- -e KAT_RUN_MODE \
- -e KAT_VERBOSE \
- -e PYTEST_ARGS \
- -e DEV_USE_IMAGEPULLSECRET \
- -e DEV_REGISTRY \
- -e DOCKER_BUILD_USERNAME \
- -e DOCKER_BUILD_PASSWORD \
- -e AMBASSADOR_ENVOY_API_VERSION \
- -e AMBASSADOR_FAST_RECONFIGURE \
- -e AWS_SECRET_ACCESS_KEY \
- -e AWS_ACCESS_KEY_ID \
- -e AWS_SESSION_TOKEN \
- -it $(shell $(BUILDER)) /buildroot/builder.sh pytest-internal ; test_exit=$$? ; \
- [ -n "$(TEST_XML_DIR)" ] && docker cp $(shell $(BUILDER)):/tmp/test-data/pytest.xml $(TEST_XML_DIR) ; exit $$test_exit
-.PHONY: pytest-builder-only
-
pytest-gold:
sh $(COPY_GOLD) $(PYTEST_GOLD_DIR)
-mypy-server-stop: setup-diagd
+mypy-server-stop: $(OSS_HOME)/venv
@printf "${CYN}==> ${GRN}Stopping mypy server${END}"
{ . $(OSS_HOME)/venv/bin/activate && dmypy stop; }
.PHONY: mypy-server-stop
-mypy-server: setup-diagd
+mypy-server: $(OSS_HOME)/venv
{ . $(OSS_HOME)/venv/bin/activate && \
if ! dmypy status >/dev/null; then \
dmypy start -- --use-fine-grained-cache --follow-imports=skip --ignore-missing-imports ;\
@@ -532,48 +443,35 @@ mypy: mypy-server
{ . $(OSS_HOME)/venv/bin/activate && time dmypy check python; }
.PHONY: mypy
-GOTEST_PKGS = github.com/datawire/ambassador/v2/...
-GOTEST_MODDIRS = $(OSS_HOME)
-export GOTEST_PKGS
-export GOTEST_MODDIRS
-
-GOTEST_ARGS ?= -race -count=1
-export GOTEST_ARGS
-
-create-venv:
- [[ -d $(OSS_HOME)/venv ]] || python3 -m venv $(OSS_HOME)/venv
-.PHONY: create-venv
-
# If we're setting up within Alpine linux, make sure to pin pip and pip-tools
# to something that is still PEP517 compatible. This allows us to set _manylinux.py
# and convince pip to install prebuilt wheels. We do this because there's no good
# rust toolchain to build orjson within Alpine itself.
-setup-venv:
- @set -e; { \
- if [ -f /etc/issue ] && grep "Alpine Linux" < /etc/issue ; then \
- pip3 install -U pip==20.2.4 pip-tools==5.3.1; \
- echo 'manylinux1_compatible = True' > venv/lib/python3.8/site-packages/_manylinux.py; \
- pip install orjson==3.3.1; \
- rm -f venv/lib/python3.8/site-packages/_manylinux.py; \
- else \
- pip install orjson==3.6.0; \
- fi; \
- pip install -r $(OSS_HOME)/builder/requirements.txt; \
- pip install -r $(OSS_HOME)/builder/requirements-dev.txt; \
- pip install -e $(OSS_HOME)/python; \
+$(OSS_HOME)/venv: builder/requirements.txt builder/requirements-dev.txt
+ rm -rf $@
+ python3 -m venv $@
+ { \
+ if grep "Alpine Linux" /etc/issue &>/dev/null; then \
+ $@/bin/pip3 install -U pip==20.2.4 pip-tools==5.3.1; \
+ echo 'manylinux1_compatible = True' > $@/lib/python3.8/site-packages/_manylinux.py; \
+ $@/bin/pip3 install orjson==3.3.1; \
+ rm -f venv/lib/python3.8/site-packages/_manylinux.py; \
+ else \
+ $@/bin/pip3 install orjson==3.6.0; \
+ fi; \
}
-.PHONY: setup-orjson
-
-setup-diagd: create-venv
- . $(OSS_HOME)/venv/bin/activate && $(MAKE) setup-venv
-.PHONY: setup-diagd
+ $@/bin/pip3 install -r builder/requirements.txt
+ $@/bin/pip3 install -r builder/requirements-dev.txt
+ $@/bin/pip3 install -e $(OSS_HOME)/python
-gotest: setup-diagd $(tools/kubectl)
+GOTEST_ARGS ?= -race -count=1 -timeout 30m
+GOTEST_PKGS ?= ./...
+gotest: $(OSS_HOME)/venv $(tools/kubectl)
@printf "$(CYN)==> $(GRN)Running $(BLU)go$(GRN) tests$(END)\n"
{ . $(OSS_HOME)/venv/bin/activate && \
export PATH=$(tools.bindir):$${PATH} && \
export EDGE_STACK=$(GOTEST_AES_ENABLED) && \
- $(OSS_HOME)/builder/builder.sh gotest-local; }
+ go test $(GOTEST_ARGS) $(GOTEST_PKGS); }
.PHONY: gotest
# Ingress v1 conformance tests, using KIND and the Ingress Conformance Tests suite.
@@ -660,11 +558,6 @@ ingresstest: $(tools/kubectl) | docker/$(LCNAME).docker.push.remote
test: ingresstest gotest pytest
.PHONY: test
-shell: docker/container.txt
- @printf "$(CYN)==> $(GRN)Launching interactive shell...$(END)\n"
- @$(BUILDER) shell
-.PHONY: shell
-
AMB_IMAGE_RC=$(RELEASE_REGISTRY)/$(REPO):$(patsubst v%,%,$(VERSION))
AMB_IMAGE_RELEASE=$(RELEASE_REGISTRY)/$(REPO):$(patsubst v%,%,$(VERSION))
@@ -888,18 +781,12 @@ clobber:
AMBASSADOR_DOCKER_IMAGE = $(shell sed -n 2p docker/$(LCNAME).docker.push.remote 2>/dev/null)
export AMBASSADOR_DOCKER_IMAGE
-KAT_CLIENT_DOCKER_IMAGE = $(shell sed -n 2p docker/kat-client.docker.push.remote 2>/dev/null)
-export KAT_CLIENT_DOCKER_IMAGE
-KAT_SERVER_DOCKER_IMAGE = $(shell sed -n 2p docker/kat-server.docker.push.remote 2>/dev/null)
-export KAT_SERVER_DOCKER_IMAGE
_user-vars = BUILDER_NAME
_user-vars += DEV_KUBECONFIG
_user-vars += DEV_REGISTRY
_user-vars += RELEASE_REGISTRY
_user-vars += AMBASSADOR_DOCKER_IMAGE
-_user-vars += KAT_CLIENT_DOCKER_IMAGE
-_user-vars += KAT_SERVER_DOCKER_IMAGE
env:
@printf '$(BLD)%s$(END)=$(BLU)%s$(END)\n' $(foreach v,$(_user-vars), $v $(call quote.shell,$(call quote.shell,$($v))) )
.PHONY: env
@@ -971,8 +858,7 @@ $(BLU)$$DOCKER_BUILD_USERNAME$(END), and $(BLU)$$DOCKER_BUILD_PASSWORD$(END).
By default, the base builder image is (as an optimization) pulled from
$(BLU)$$BASE_REGISTRY$(END) instead of being built locally; where $(BLD)$$BASE_REGISTRY$(END)
-defaults to $(BLD)$$DEV_REGISTRY$(END) or else $(BLD)$${BUILDER_NAME}.local$(END). If that pull
-fails (as it will if trying to pull from a $(BLD).local$(END) registry, or if the
+defaults to $(BLD)docker.io/emissaryingress$(END). If that pull fails, (as it will if the
image does not yet exist), then it falls back to building the base image
locally. If $(BLD)$$BASE_REGISTRY$(END) is equal to $(BLD)$$DEV_REGISTRY$(END), then it will
proceed to push the built image back to the $(BLD)$$BASE_REGISTRY$(END).
@@ -991,12 +877,8 @@ define _help.targets
$(BLD)$(MAKE) $(BLU)preflight$(END) -- checks dependencies of this makefile.
- $(BLD)$(MAKE) $(BLU)sync$(END) -- syncs source code into the build container.
-
$(BLD)$(MAKE) $(BLU)version$(END) -- display source code version.
- $(BLD)$(MAKE) $(BLU)compile$(END) -- syncs and compiles the source code in the build container.
-
$(BLD)$(MAKE) $(BLU)images$(END) -- creates images from the build container.
$(BLD)$(MAKE) $(BLU)push$(END) -- pushes images to $(BLD)$$DEV_REGISTRY$(END). ($(DEV_REGISTRY))
@@ -1033,12 +915,6 @@ define _help.targets
$(BLD)DO NOT$(END) run $(BLD)$(MAKE) $(BLU)pytest-gold$(END) if you have failing tests.
- $(BLD)$(MAKE) $(BLU)shell$(END) -- starts a shell in the build container
-
- The current commit must be tagged for this to work, and your tree must be clean.
- Additionally, the tag must be of the form 'vX.Y.Z-rc.N'. You must also have previously
- built an RC for the same tag using $(BLD)release/bits$(END).
-
$(BLD)$(MAKE) $(BLU)release/promote-oss/to-ga$(END) -- promote a release candidate to general availability
The current commit must be tagged for this to work, and your tree must be clean.
diff --git a/builder/builder.sh b/builder/builder.sh
index 6fa894b2f5..2a8f9bd915 100755
--- a/builder/builder.sh
+++ b/builder/builder.sh
@@ -1,10 +1,5 @@
#!/usr/bin/env bash
-shopt -s expand_aliases
-
-alias echo_on="{ set -x; }"
-alias echo_off="{ set +x; } 2>/dev/null"
-
# Choose colors carefully. If they don't work on both a black
# background and a white background, pick other colors (so white,
# yellow, and black are poor choices).
@@ -23,10 +18,6 @@ while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symli
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
-TEST_DATA_DIR=/tmp/test-data/
-if [[ -n "${TEST_XML_DIR}" ]] ; then
- TEST_DATA_DIR=${TEST_XML_DIR}
-fi
dsum() {
local exe=${DIR}/../tools/bin/dsum
@@ -36,25 +27,6 @@ dsum() {
"$exe" "$@"
}
-now=$(date +"%H%M%S")
-
-# container name of the builder
-BUILDER_CONT_NAME=${BUILDER_CONT_NAME:-"bld-${BUILDER_NAME}-${now}"}
-
-# command for running a container (ie, "docker run")
-BUILDER_DOCKER_RUN=${BUILDER_DOCKER_RUN:-docker run}
-
-# the name of the Docker network
-# note: this is necessary for connecting the builder to a local k3d/microk8s/kind network (ie, for running tests)
-BUILDER_DOCKER_NETWORK=${BUILDER_DOCKER_NETWORK:-${BUILDER_NAME}}
-
-# Do this with `eval` so that we properly interpret quotes.
-eval "pytest_args=(${PYTEST_ARGS:-})"
-
-msg() {
- printf "${CYN}==> ${GRN}%s${END}\n" "$*" >&2
-}
-
msg2() {
printf "${BLU} -> ${GRN}%s${END}\n" "$*" >&2
}
@@ -64,49 +36,6 @@ panic() {
exit 1
}
-builder() {
- if ! [ -e docker/builder-base.docker ]; then
- panic "This should not happen: 'docker/builder-base.docker' does not exist"
- fi
- if ! [ -e docker/base-envoy.docker ]; then
- panic "This should not happen: 'docker/base-envoy.docker' does not exist"
- fi
- local builder_base_image envoy_base_image
- builder_base_image=$(cat docker/builder-base.docker)
- envoy_base_image=$(cat docker/base-envoy.docker)
- docker ps --quiet \
- --filter=label=builder \
- --filter=label="$BUILDER_NAME" \
- --filter=label=builderbase="$builder_base_image" \
- --filter=label=envoybase="$envoy_base_image"
-}
-builder_network() { docker network ls -q -f name="${BUILDER_DOCKER_NETWORK}"; }
-
-builder_volume() { docker volume ls -q -f label=builder; }
-
-declare -a dsynced
-
-dsync() {
- msg2 "Synchronizing... $*"
- TIMEFORMAT=" (sync took %1R seconds)"
- time IFS='|' read -ra dsynced <<<"$(rsync --info=name -aO --blocking-io -e 'docker exec -i' $@ 2> >(fgrep -v 'rsync: failed to set permissions on' >&2) | tr '\n' '|')"
-}
-
-dcopy() {
- msg2 "Copying... $*"
- local TIMEFORMAT=" (copy took %1R seconds)"
- time docker cp $@
-}
-
-dexec() {
- if [[ -t 0 ]]; then
- flags=-it
- else
- flags=-i
- fi
- docker exec ${flags} $(builder) "$@"
-}
-
# Usage: build_builder_base [--stage1-only]
# Effects:
# 1. Set the `builder_base_image` variable in the parent scope
@@ -175,13 +104,11 @@ print("stage2_tag=%s" % stage2)
local stage1_tag stage2_tag
eval "$(cd "$DIR" && python -c "$builder_base_tag_py")" # sets 'stage1_tag' and 'stage2_tag'
- local BASE_REGISTRY="${BASE_REGISTRY:-${DEV_REGISTRY:-${BUILDER_NAME}.local}}"
-
local name1="${BASE_REGISTRY}/builder-base:stage1-${stage1_tag}"
local name2="${BASE_REGISTRY}/builder-base:stage2-${stage2_tag}"
msg2 "Using stage-1 base ${BLU}${name1}${GRN}"
- if ! (docker image inspect "$name1" || docker pull "$name2") &>/dev/null; then # skip building if the "$name1" already exists
+ if ! (docker image inspect "$name1" || docker pull "$name1") &>/dev/null; then # skip building if the "$name1" already exists
dsum 'stage-1 build' 3s \
docker build -f "${DIR}/Dockerfile.base" -t "${name1}" --target builderbase-stage1 "${DIR}"
if [[ "$BASE_REGISTRY" == "$DEV_REGISTRY" ]]; then
@@ -209,250 +136,9 @@ print("stage2_tag=%s" % stage2)
builder_base_image="$name2" # not local
}
-bootstrap() {
- if [ -z "$(builder_volume)" ] ; then
- docker volume create --label builder
- msg2 "Created docker volume ${BLU}$(builder_volume)${GRN} for caching"
- fi
-
- if [ -z "$(builder_network)" ]; then
- msg2 "Creating docker network ${BLU}${BUILDER_DOCKER_NETWORK}${GRN}"
- docker network create "${BUILDER_DOCKER_NETWORK}" > /dev/null
- else
- msg2 "Connecting to existing network ${BLU}${BUILDER_DOCKER_NETWORK}${GRN}"
- fi
-
- if [ -z "$(builder)" ] ; then
- if ! [ -e docker/builder-base.docker ]; then
- panic "This should not happen: 'docker/builder-base.docker' does not exist"
- fi
- if ! [ -e docker/base-envoy.docker ]; then
- panic "This should not happen: 'docker/base-envoy.docker' does not exist"
- fi
- local builder_base_image envoy_base_image
- builder_base_image=$(cat docker/builder-base.docker)
- envoy_base_image=$(cat docker/base-envoy.docker)
- msg2 'Bootstrapping build image'
- dsum 'builder bootstrap' 3s \
- docker build \
- --build-arg=envoy="${envoy_base_image}" \
- --build-arg=builderbase="${builder_base_image}" \
- --target=builder \
- ${DIR} -t ${BUILDER_NAME}.local/builder
- if stat --version | grep -q GNU ; then
- DOCKER_GID=$(stat -c "%g" /var/run/docker.sock)
- else
- DOCKER_GID=$(stat -f "%g" /var/run/docker.sock)
- fi
- if [ -z "${DOCKER_GID}" ]; then
- panic "Unable to determine docker group-id"
- fi
-
- msg2 'Starting build container...'
-
- echo_on
- $BUILDER_DOCKER_RUN \
- --name="$BUILDER_CONT_NAME" \
- --network="${BUILDER_DOCKER_NETWORK}" \
- --network-alias="builder" \
- --group-add="${DOCKER_GID}" \
- --detach \
- --rm \
- --volume=/var/run/docker.sock:/var/run/docker.sock \
- --volume="$(builder_volume):/home/dw" \
- ${BUILDER_MOUNTS} \
- --cap-add=NET_ADMIN \
- --label=builder \
- --label="${BUILDER_NAME}" \
- --label=builderbase="$builder_base_image" \
- --label=envoybase="$envoy_base_image" \
- ${BUILDER_PORTMAPS} \
- ${BUILDER_DOCKER_EXTRA} \
- --env=BUILDER_NAME="${BUILDER_NAME}" \
- --env=GOPRIVATE="${GOPRIVATE}" \
- --env=AWS_SECRET_ACCESS_KEY \
- --env=AWS_ACCESS_KEY_ID \
- --env=AWS_SESSION_TOKEN \
- --init \
- --entrypoint=tail ${BUILDER_NAME}.local/builder -f /dev/null > /dev/null
- echo_off
-
- msg2 "Started build container ${BLU}$(builder)${GRN}"
- fi
-
- dcopy ${DIR}/builder.sh $(builder):/buildroot
- dcopy ${DIR}/builder_bash_rc $(builder):/home/dw/.bashrc
-
- # If we've been asked to muck with gitconfig, do it.
- if [ -n "$SYNC_GITCONFIG" ]; then
- dsync "$SYNC_GITCONFIG" $(builder):/home/dw/.gitconfig
- fi
-}
-
-sync() {
- name=$1
- sourcedir=$2
- container=$3
-
- real=$(cd ${sourcedir}; pwd)
-
- make python/ambassador.version
-
- dexec mkdir -p /buildroot/${name}
- if [[ $name == apro ]]; then
- # Don't let 'deleting ambassador' cause the sync to be marked dirty
- dexec sh -c 'rm -rf apro/ambassador'
- fi
- dsync $DSYNC_EXTRA --exclude-from=${DIR}/sync-excludes.txt --delete ${real}/ ${container}:/buildroot/${name}
- summarize-sync $name "${dsynced[@]}"
- if [[ $name == apro ]]; then
- # BusyBox `ln` 1.30.1's `-T` flag is broken, and doesn't have a `-t` flag.
- dexec sh -c 'if ! test -L apro/ambassador; then rm -rf apro/ambassador && ln -s ../ambassador apro; fi'
- fi
-}
-
-summarize-sync() {
- name=$1
- shift
- lines=("$@")
- local pydirty=false
- local godirty=false
- for line in "${lines[@]}"; do
- if [[ $line != *.version ]] && ! $pydirty; then
- dexec touch ${name}.dirty image.dirty
- pydirty=true
- fi
- if [[ $line = *.go ]] && ! $godirty; then
- dexec touch go.dirty
- godirty=true
- fi
- if $pydirty && $godirty; then
- break
- fi
- done
- printf " ${GRN}Synced ${#lines[@]} ${BLU}${name}${GRN} source files${END}\n"
- PARTIAL="yes"
- for i in {0..9}; do
- if [ "$i" = "${#lines[@]}" ]; then
- PARTIAL=""
- break
- fi
- line="${lines[$i]}"
- printf " ${CYN}%s${END}\n" "$line"
- done
- if [ -n "${PARTIAL}" ]; then
- printf " ${CYN}...${END}\n"
- fi
-}
-
-clean() {
- local cid
- # This command is similar to
- #
- # builder | while read -r cid; do
- #
- # except that this command does *not* filter based on the
- # `builderbase=` and `envoybase=` labels, because we want to
- # garbage-collect old containers that were orphaned when either
- # the builderbase or the envoybase image changed.
- docker ps --quiet \
- --filter=label=builder \
- --filter=label="$BUILDER_NAME" \
- | while read -r cid; do
- printf "${GRN}Killing build container ${BLU}${cid}${END}\n"
- docker kill ${cid} > /dev/null 2>&1
- docker wait ${cid} > /dev/null 2>&1 || true
- done
- local nid
- nid=$(builder_network)
- if [ -n "${nid}" ] ; then
- printf "${GRN}Removing docker network ${BLU}${BUILDER_DOCKER_NETWORK} (${nid})${END}\n"
- # This will fail if the network has some other endpoints alive: silence any errors
- docker network rm ${nid} 2>&1 >/dev/null || true
- fi
-}
-
-find-modules () {
- find /buildroot -type d -mindepth 1 -maxdepth 1 \! -name bin | sort
-}
-
-cmd="${1:-builder}"
+cmd="${1:-help}"
case "${cmd}" in
- clean)
- clean
- ;;
- clobber)
- clean
- vid=$(builder_volume)
- if [ -n "${vid}" ] ; then
- printf "${GRN}Killing cache volume ${BLU}${vid}${END}\n"
- if ! docker volume rm ${vid} > /dev/null 2>&1 ; then \
- printf "${RED}Could not kill cache volume; are other builders still running?${END}\n"
- fi
- fi
- ;;
- bootstrap)
- bootstrap >&2
- echo $(builder)
- ;;
- builder)
- echo $(builder)
- ;;
- sync)
- shift
- sync $1 $2 $(builder)
- ;;
- compile)
- shift
- dexec /buildroot/builder.sh compile-internal
- ;;
- compile-internal)
- # This runs inside the builder image
- if [[ $(find-modules) != /buildroot/ambassador* ]]; then
- echo "Error: ambassador must be the first module to build things correctly"
- echo "Modules are: $(find-modules)"
- exit 1
- fi
-
- for MODDIR in $(find-modules); do
- module=$(basename ${MODDIR})
-
- if [ -e ${module}.dirty ] || ([ "$module" != ambassador ] && [ -e go.dirty ]) ; then
- if [ -e "${MODDIR}/go.mod" ]; then
- printf "${CYN}==> ${GRN}Building ${BLU}${module}${GRN} go code${END}\n"
- echo_on
- mkdir -p /buildroot/bin
- TIMEFORMAT=" (go build took %1R seconds)"
- (cd ${MODDIR} && time go build -trimpath -o /buildroot/bin ./cmd/...) || exit 1
- TIMEFORMAT=" (${MODDIR}/post-compile took %1R seconds)"
- if [ -e ${MODDIR}/post-compile.sh ]; then (cd ${MODDIR} && time bash post-compile.sh); fi
- unset TIMEFORMAT
- echo_off
- fi
- fi
-
- if [ -e ${module}.dirty ]; then
- if [ -e "${MODDIR}/python" ]; then
- if ! [ -e ${MODDIR}/python/*.egg-info ]; then
- printf "${CYN}==> ${GRN}Setting up ${BLU}${module}${GRN} python code${END}\n"
- echo_on
- TIMEFORMAT=" (pip install took %1R seconds)"
- time sudo pip install --no-deps -e ${MODDIR}/python || exit 1
- unset TIMEFORMAT
- echo_off
- fi
- chmod a+x ${MODDIR}/python/*.py
- fi
-
- rm ${module}.dirty
- else
- printf "${CYN}==> ${GRN}Already built ${BLU}${module}${GRN}${END}\n"
- fi
- done
- rm -f go.dirty # Do this after _all_ the Go code is built
- ;;
-
pip-compile)
build_builder_base --stage1-only
printf "${GRN}Running pip-compile to update ${BLU}requirements.txt${END}\n"
@@ -462,129 +148,12 @@ case "${cmd}" in
mv -f "$DIR/requirements.txt.tmp" "$DIR/requirements.txt"
;;
- pytest-local)
- fail=""
- mkdir -p ${TEST_DATA_DIR}
-
- if [ -z "$SOURCE_ROOT" ] ; then
- export SOURCE_ROOT="$PWD"
- fi
-
- if [ -z "$MODDIR" ] ; then
- export MODDIR="$PWD"
- fi
-
- if [ -z "$ENVOY_PATH" ] ; then
- export ENVOY_PATH="${MODDIR}/bin/envoy"
- fi
- if [ ! -f "$ENVOY_PATH" ] ; then
- echo "Envoy not found at ENVOY_PATH=$ENVOY_PATH"
- exit 1
- fi
-
- if [ -z "$KUBESTATUS_PATH" ] ; then
- export KUBESTATUS_PATH="${MODDIR}/tools/bin/kubestatus"
- fi
- if [ ! -f "$KUBESTATUS_PATH" ] ; then
- echo "Kubestatus not found at $KUBESTATUS_PATH"
- exit 1
- fi
-
- echo "$0: EDGE_STACK=$EDGE_STACK"
- echo "$0: SOURCE_ROOT=$SOURCE_ROOT"
- echo "$0: MODDIR=$MODDIR"
- echo "$0: ENVOY_PATH=$ENVOY_PATH"
- echo "$0: KUBESTATUS_PATH=$KUBESTATUS_PATH"
- if ! (cd ${MODDIR} && pytest --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html --junitxml=${TEST_DATA_DIR}/pytest.xml --tb=short -rP "${pytest_args[@]}") then
- fail="yes"
- fi
-
- if [ "${fail}" = yes ]; then
- exit 1
- fi
- ;;
-
- pytest-local-unit)
- fail=""
- mkdir -p ${TEST_DATA_DIR}
-
- if [ -z "$SOURCE_ROOT" ] ; then
- export SOURCE_ROOT="$PWD"
- fi
-
- if [ -z "$MODDIR" ] ; then
- export MODDIR="$PWD"
- fi
-
- if [ -z "$ENVOY_PATH" ] ; then
- export ENVOY_PATH="${MODDIR}/bin/envoy"
- fi
- if [ ! -f "$ENVOY_PATH" ] ; then
- echo "Envoy not found at ENVOY_PATH=$ENVOY_PATH"
- exit 1
- fi
-
- echo "$0: SOURCE_ROOT=$SOURCE_ROOT"
- echo "$0: MODDIR=$MODDIR"
- echo "$0: ENVOY_PATH=$ENVOY_PATH"
- if ! (cd ${MODDIR} && pytest --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html --junitxml=${TEST_DATA_DIR}/pytest.xml --tb=short -rP "${pytest_args[@]}") then
- fail="yes"
- fi
-
- if [ "${fail}" = yes ]; then
- exit 1
- fi
- ;;
-
- pytest-internal)
- # This runs inside the builder image
- fail=""
- mkdir -p ${TEST_DATA_DIR}
- for MODDIR in $(find-modules); do
- if [ -e "${MODDIR}/python" ]; then
- if ! (cd ${MODDIR} && pytest --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html --junitxml=${TEST_DATA_DIR}/pytest.xml --tb=short -ra "${pytest_args[@]}") then
- fail="yes"
- fi
- fi
- done
-
- if [ "${fail}" = yes ]; then
- exit 1
- fi
- ;;
- gotest-local)
- [ -n "${TEST_XML_DIR}" ] && mkdir -p ${TEST_XML_DIR}
- fail=""
- for MODDIR in ${GOTEST_MODDIRS} ; do
- if [ -e "${MODDIR}/go.mod" ]; then
- pkgs=$(cd ${MODDIR} && go list -f='{{ if or (gt (len .TestGoFiles) 0) (gt (len .XTestGoFiles) 0) }}{{ .ImportPath }}{{ end }}' ${GOTEST_PKGS})
- if [ -n "${pkgs}" ]; then
- modname=`basename ${MODDIR}`
- junitarg=
- if [[ -n "${TEST_XML_DIR}" ]] ; then
- junitarg="--junitfile ${TEST_XML_DIR}/${modname}-gotest.xml"
- fi
- if ! (cd ${MODDIR} && gotestsum ${junitarg} --rerun-fails=3 --format=testname --packages="${pkgs}" -- -v ${GOTEST_ARGS}) ; then
- fail="yes"
- fi
- fi
- fi
- done
-
- if [ "${fail}" = yes ]; then
- exit 1
- fi
- ;;
build-builder-base)
build_builder_base >&2
echo "${builder_base_image}"
;;
- shell)
- echo
- docker exec -it "$(builder)" /bin/bash
- ;;
*)
- echo "usage: builder.sh [bootstrap|builder|clean|clobber|compile|build-builder-base|shell]"
+ echo "usage: builder.sh [pip-compile|build-builder-base]"
exit 1
;;
esac
diff --git a/builder/builder_bash_rc b/builder/builder_bash_rc
deleted file mode 100644
index d30007aa2e..0000000000
--- a/builder/builder_bash_rc
+++ /dev/null
@@ -1,34 +0,0 @@
-alias echo_on="{ set -x; }"
-alias echo_off="{ set +x; } 2>/dev/null"
-
-# Choose colors carefully. If they don't work on both a black
-# background and a white background, pick other colors (so white,
-# yellow, and black are poor choices).
-RED='\033[1;31m'
-GRN='\033[1;32m'
-BLU='\033[1;34m'
-CYN='\033[1;36m'
-END='\033[0m'
-
-source /etc/profile.d/bash_completion.sh
-source /usr/local/bin/kube-ps1.sh
-
-alias k=kubectl
-complete -F __start_kubectl k
-
-# These are already accomodated by the kubectx/kubens completions.
-alias kctx=kubectx
-alias kns=kubens
-
-__prompt_info () {
- PS1="$(kube_ps1) ${BUILDER_NAME}:\w $ "
-}
-
-old=""
-
-if [ -n "$PROMPT_COMMAND" ]; then
- old="$PROMPT_COMMAND"
-fi
-
-PROMPT_COMMAND="__prompt_info;$old"
-
diff --git a/builder/server.crt b/builder/server.crt
deleted file mode 100644
index f14444e199..0000000000
--- a/builder/server.crt
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID/TCCAuWgAwIBAgIRALtiL2LmGk+hOxkWdYDr6p4wDQYJKoZIhvcNAQELBQAw
-gYYxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJNQTEPMA0GA1UEBxMGQm9zdG9uMRgw
-FgYDVQQKEw9BbWJhc3NhZG9yIExhYnMxFDASBgNVBAsTC0VuZ2luZWVyaW5nMSkw
-JwYDVQQDEyBrYXQtc2VydmVyLnRlc3QuZ2V0YW1iYXNzYWRvci5pbzAgFw0yMTEx
-MTAxMzEyMDBaGA8yMDk5MTExMDEzMTIwMFowgYYxCzAJBgNVBAYTAlVTMQswCQYD
-VQQIEwJNQTEPMA0GA1UEBxMGQm9zdG9uMRgwFgYDVQQKEw9BbWJhc3NhZG9yIExh
-YnMxFDASBgNVBAsTC0VuZ2luZWVyaW5nMSkwJwYDVQQDEyBrYXQtc2VydmVyLnRl
-c3QuZ2V0YW1iYXNzYWRvci5pbzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
-ggEBAOYsTtWT++txP8svst9oaUECcAA6EU1izlkVz5a5k4hTvvVGfnzgNEddICV+
-hwgl6jvvQOpbsTA2201/Ps70i8KFtlOtyVo5mtDnnTyl7Xlyn0gzSn8dAtmPYCIh
-enfEw9w897Y9FQ9iPnBFD215CNhe9thtp9zVgCmXg2fYgG+TTPSKin3umJZbNxxp
-cj3VIma1Uq98dDL2BaH6H3fxgdcOoP3ttQUqt2XxPzLq4H4QX5grxtknY/b4hh9h
-EQf4ilLAZkzQJI4JFXrpTCUfHKeAQMvUnR17EDjJjjWa8BI72UVZwNVuISWOmbjt
-RKm+BytbDe00bXez2bVU4o9NOnsCAwEAAaNiMGAwDgYDVR0PAQH/BAQDAgWgMBMG
-A1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwKwYDVR0RBCQwIoIga2F0
-LXNlcnZlci50ZXN0LmdldGFtYmFzc2Fkb3IuaW8wDQYJKoZIhvcNAQELBQADggEB
-AH6E9OsPm6+K3SFmxeNJePgyoC+gb5KFAzAPmcGdmWzvn9vMev9SwY/Bowm2lb2C
-KM5KtrWxLKwT19U5QqkmbzabD8yjQfspQBKp3+yROpV2Uy2SFQjLpScqX++iiPW/
-KgVDd6AoT++eDduFV/4wheemDp/Ac3qW3oy9Wtb1l0YD3vsz/XXI67McGRIYyhc2
-mVV40T8Ejy+2k2Gu5HpmFNY9Yk5VZ/k9uFNafmctpT//90n/05qbf0wt+Fx1aiXm
-ONnkiDYcW4lYCIW5DavngN5x2CEEorErCzqwAAAnpjcUwmLYqP8F36eDhFXc5G64
-zSaWaB/9OGxW3jk9JK0Zdjg=
------END CERTIFICATE-----
diff --git a/builder/server.key b/builder/server.key
deleted file mode 100644
index 200d28b94f..0000000000
--- a/builder/server.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDmLE7Vk/vrcT/L
-L7LfaGlBAnAAOhFNYs5ZFc+WuZOIU771Rn584DRHXSAlfocIJeo770DqW7EwNttN
-fz7O9IvChbZTrclaOZrQ5508pe15cp9IM0p/HQLZj2AiIXp3xMPcPPe2PRUPYj5w
-RQ9teQjYXvbYbafc1YApl4Nn2IBvk0z0iop97piWWzccaXI91SJmtVKvfHQy9gWh
-+h938YHXDqD97bUFKrdl8T8y6uB+EF+YK8bZJ2P2+IYfYREH+IpSwGZM0CSOCRV6
-6UwlHxyngEDL1J0dexA4yY41mvASO9lFWcDVbiEljpm47USpvgcrWw3tNG13s9m1
-VOKPTTp7AgMBAAECggEAdXHmR9RalqgShPqxqoz8J8+2lmcKrRHdeFdItP+757PT
-nBN8NvuvsTozTO5iuCWgfzR1Sx++KXvUKpbi1XayWW8guVHTi9YYyL9l26xOn8l8
-vLpvKK0E5z3hL4kllDcyOQ5i1voT5OWcNm0GkjVP7heGnKhXHrdRzechhbtSzZrD
-nLIIYtJ3yA64GdK7fKrQdHXm4Ioq/sKXDGA9+9Fut8Ya/WEgkRBCnFlwGnKkjFMS
-8woVp+LPeG/4Be60F/TDervl3EFMVCPNnf1cy84mTXLjwr1xzFi3w73IkpvsP6Zy
-ZUSjgMfomWo/UUxPN4D/PTIwQd6GCfRjA9vBkoEcAQKBgQDpGR7FZ/1g74XlFRpi
-lwv2IcoTkmW7W53NKaAN85EWr//JpS4MI/2zCD2SYum/GC7R7VOfzgzd6+S/2HZz
-0tFqeNvqltNIKws3JqWs9fwgxuY0WWtYeJ7kQMvhvKh6Skc7BXjH48vQsSKKS6sI
-cgSxl8IB59LRT8OjsoBHvhKr8QKBgQD8yZ3zeLT+nTq9Qz66xZrFoTnrZ60Mjf4l
-vMRPCUfXHdWePZYHi8ucZk5nk7gioXlxsXs9cY+wNy1e+XPYr5K0MCD835QwvUOs
-Vx6rm5PHfHVqWiY7gUIM4kSfXPxEkyR+zYsp5cmvAWJRHU65ncE+kAjl5WvWo/Bb
-TomZumzpKwKBgDtSqFEnKQYNj28A0uk6ddHbQZl/jaKEK4H2UQSb81Xpzthb0H9I
-fZ2dwEXMJJ78oZXNp5WpE/86tMnaZjMN+Tv2rElkvzrDsaQEku+U3ujclePGyhoH
-ALEoiHrXz6zn0H8F4npE0C85lYfKwClzExy7eaWQgUfB3BGCMUGQ7yGRAoGAMzwe
-AnHtpUly1VdiyHIym2nBFoBN56SD5VwsIaTM5cHLQSZUWTUxUIK/02hyQrqdpbiG
-NP7mU53rWTuQClQTlOfIV9hs7a/+nliOA9QlkKIeHi8lXpwlKf1WUUKEctWr6PN6
-CETlqs3KAozBHwflWevRkd79C2Tw22DArOLHHj8CgYBzE3OMd99DsYIZRAjvqq5b
-W0G0bvV8oSg6U2sLIW1zXndE4Oze5DCGxt81KDRhI7Ys0SGV39BhxlO/enZDmWJx
-jIYTjNPdtCXc8mFe1K6OvAsCRM0chZCCp3MBAK6HWC2d7YckMJqlT6qdCz6o2sJF
-eMyj69OupAXLMi5eISfHWQ==
------END PRIVATE KEY-----
diff --git a/builder/sync-excludes.txt b/builder/sync-excludes.txt
deleted file mode 100644
index 8e7823681b..0000000000
--- a/builder/sync-excludes.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-*.tmp
-*.log
-*.stamp
-*.tap
-*.webm
-*.o
-.git
-*.egg-info
-__pycache__
-.pytest_cache
-.mypy_cache
-.dmypy.json
-.coverage
-go.sum
-
-bin/
-node_modules/
-venv/
-
-# absolute names
-/Makefile
-/builder/
-/cxx/envoy/
-/_cxx/envoy/
-/vendor/
-/docker/
-/build-aux/*.mk
-
-# absolute names (apro.git)
-/aes-example-plugins/
-/ambassador/
-/bin_darwin_amd64/
-/bin_linux_amd64/
-/tests/cluster/go-test/consul/new_root.*
diff --git a/charts/charts.mk b/charts/charts.mk
index 61ae29fc4e..5c2277606d 100644
--- a/charts/charts.mk
+++ b/charts/charts.mk
@@ -1,7 +1,6 @@
EMISSARY_CHART = $(OSS_HOME)/charts/emissary-ingress
-push-preflight: create-venv $(tools/yq)
- @$(OSS_HOME)/venv/bin/python -m pip install ruamel.yaml
+push-preflight: $(OSS_HOME)/venv $(tools/yq)
.PHONY: push-preflight
release/ga/chart-push:
diff --git a/charts/emissary-ingress/templates/deployment.yaml b/charts/emissary-ingress/templates/deployment.yaml
index 1a4c6a4415..0d65ac127f 100644
--- a/charts/emissary-ingress/templates/deployment.yaml
+++ b/charts/emissary-ingress/templates/deployment.yaml
@@ -195,6 +195,8 @@ spec:
fieldPath: metadata.namespace
{{- end -}}
{{- if .Values.env }}
+ - name: AGENT_CONFIG_RESOURCE_NAME
+ value: {{ include "ambassador.fullname" . }}-agent-cloud-token
{{- range $key,$value := .Values.env }}
- name: {{ $key | upper | quote}}
value: {{ $value | quote}}
diff --git a/charts/emissary-ingress/templates/rbac.yaml b/charts/emissary-ingress/templates/rbac.yaml
index 8d666fa338..b177db0d78 100644
--- a/charts/emissary-ingress/templates/rbac.yaml
+++ b/charts/emissary-ingress/templates/rbac.yaml
@@ -85,6 +85,7 @@ rules:
- namespaces
- services
- secrets
+ - configmaps
- endpoints
verbs: ["get", "list", "watch"]
diff --git a/cmd/entrypoint/env.go b/cmd/entrypoint/env.go
index c4769f5421..739ed0e3cb 100644
--- a/cmd/entrypoint/env.go
+++ b/cmd/entrypoint/env.go
@@ -272,6 +272,14 @@ func GetLicenseSecretNamespace() string {
return env("AMBASSADOR_AES_SECRET_NAMESPACE", GetAmbassadorNamespace())
}
+func GetCloudConnectTokenResourceName() string {
+ return env("AGENT_CONFIG_RESOURCE_NAME", "ambassador-agent-cloud-token")
+}
+
+func GetCloudConnectTokenResourceNamespace() string {
+ return env("AGENT_NAMESPACE", GetAmbassadorNamespace())
+}
+
func GetEventHost() string {
return env("DEV_AMBASSADOR_EVENT_HOST", fmt.Sprintf("http://localhost:%s", GetDiagdBindPort()))
}
diff --git a/cmd/entrypoint/interesting_types.go b/cmd/entrypoint/interesting_types.go
index 09cf633198..0d35464e7d 100644
--- a/cmd/entrypoint/interesting_types.go
+++ b/cmd/entrypoint/interesting_types.go
@@ -60,6 +60,7 @@ func GetInterestingTypes(ctx context.Context, serverTypeList []kates.APIResource
if fs != "" {
endpointFs += fmt.Sprintf(",%s", fs)
}
+ configMapFs := fmt.Sprintf("metadata.namespace=%s", GetCloudConnectTokenResourceNamespace())
// We set interestingTypes to the list of types that we'd like to watch (if that type exits
// in this cluster).
@@ -82,6 +83,7 @@ func GetInterestingTypes(ctx context.Context, serverTypeList []kates.APIResource
"Services": {{typename: "services.v1."}}, // New in Kubernetes 0.16.0 (2015-04-28) (v1beta{1..3} before that)
"Endpoints": {{typename: "endpoints.v1.", fieldselector: endpointFs}}, // New in Kubernetes 0.16.0 (2015-04-28) (v1beta{1..3} before that)
"K8sSecrets": {{typename: "secrets.v1."}}, // New in Kubernetes 0.16.0 (2015-04-28) (v1beta{1..3} before that)
+ "ConfigMaps": {{typename: "configmaps.v1.", fieldselector: configMapFs}},
"Ingresses": {
{typename: "ingresses.v1beta1.extensions"}, // New in Kubernetes 1.2.0 (2016-03-16), gone in Kubernetes 1.22.0 (2021-08-04)
{typename: "ingresses.v1beta1.networking.k8s.io"}, // New in Kubernetes 1.14.0 (2019-03-25), gone in Kubernetes 1.22.0 (2021-08-04)
diff --git a/cmd/entrypoint/resource_validator.go b/cmd/entrypoint/resource_validator.go
index 75177c1ccd..d251cfff7d 100644
--- a/cmd/entrypoint/resource_validator.go
+++ b/cmd/entrypoint/resource_validator.go
@@ -21,16 +21,14 @@ func newResourceValidator() (*resourceValidator, error) {
}
func (v *resourceValidator) isValid(ctx context.Context, un *kates.Unstructured) bool {
- key := string(un.GetUID())
err := v.katesValidator.Validate(ctx, un)
+
if err != nil {
dlog.Errorf(ctx, "validation error: %s %s/%s -- %s", un.GetKind(), un.GetNamespace(), un.GetName(), err.Error())
- copy := un.DeepCopy()
- copy.Object["errors"] = err.Error()
- v.invalid[key] = copy
+ v.addInvalid(ctx, un, err.Error())
return false
} else {
- delete(v.invalid, key)
+ v.removeInvalid(ctx, un)
return true
}
}
@@ -42,3 +40,20 @@ func (v *resourceValidator) getInvalid() []*kates.Unstructured {
}
return result
}
+
+// The addInvalid method adds a resource to the Validator's list of invalid
+// resources.
+func (v *resourceValidator) addInvalid(ctx context.Context, un *kates.Unstructured, errorMessage string) {
+ key := string(un.GetUID())
+
+ copy := un.DeepCopy()
+ copy.Object["errors"] = errorMessage
+ v.invalid[key] = copy
+}
+
+// The removeInvalid method removes a resource from the Validator's list of
+// invalid resources.
+func (v *resourceValidator) removeInvalid(ctx context.Context, un *kates.Unstructured) {
+ key := string(un.GetUID())
+ delete(v.invalid, key)
+}
diff --git a/cmd/entrypoint/secrets.go b/cmd/entrypoint/secrets.go
index 93d8f8626c..0da3c52192 100644
--- a/cmd/entrypoint/secrets.go
+++ b/cmd/entrypoint/secrets.go
@@ -2,19 +2,167 @@ package entrypoint
import (
"context"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/pem"
+ "fmt"
"strings"
amb "github.com/datawire/ambassador/v2/pkg/api/getambassador.io/v3alpha1"
"github.com/datawire/ambassador/v2/pkg/kates"
"github.com/datawire/ambassador/v2/pkg/kates/k8s_resource_types"
snapshotTypes "github.com/datawire/ambassador/v2/pkg/snapshot/v1"
+ "github.com/datawire/dlib/derror"
"github.com/datawire/dlib/dlog"
+ v1 "k8s.io/api/core/v1"
)
+// checkSecret checks whether a secret is valid, and adds it to the list of secrets
+// in this snapshot if so.
+func checkSecret(
+ ctx context.Context,
+ sh *SnapshotHolder,
+ what string,
+ ref snapshotTypes.SecretRef,
+ secret *v1.Secret) {
+ // Make it more convenient to consistently refer to this secret.
+ secretName := fmt.Sprintf("%s secret %s.%s", what, ref.Name, ref.Namespace)
+
+ if secret == nil {
+ // This is "impossible". Arguably it should be a panic...
+ dlog.Debugf(ctx, "%s not found", secretName)
+ return
+ }
+
+ // Assume that the secret is valid...
+ isValid := true
+
+ // ...and that we have no errors.
+ var errs derror.MultiError
+
+ // OK, do we have a TLS private key?
+ privKeyPEMBytes, ok := secret.Data[v1.TLSPrivateKeyKey]
+
+ if ok && len(privKeyPEMBytes) > 0 {
+ // Yes. We need to be able to decode it.
+ caKeyBlock, _ := pem.Decode(privKeyPEMBytes)
+
+ if caKeyBlock != nil {
+ dlog.Debugf(ctx, "%s has private key, block type %s", secretName, caKeyBlock.Type)
+
+ // First try PKCS1.
+ _, err := x509.ParsePKCS1PrivateKey(caKeyBlock.Bytes)
+
+ if err != nil {
+ // Try PKCS8? (No, = instead of := is not a typo here: we're overwriting the
+ // earlier error.)
+ _, err = x509.ParsePKCS8PrivateKey(caKeyBlock.Bytes)
+ }
+
+ // Any issues here?
+ if err != nil {
+ errs = append(errs,
+ fmt.Errorf("%s %s cannot be parsed as PKCS1 or PKCS8: %s", secretName, v1.TLSPrivateKeyKey, err.Error()))
+ isValid = false
+ }
+ } else {
+ errs = append(errs,
+ fmt.Errorf("%s %s is not a PEM-encoded key", secretName, v1.TLSPrivateKeyKey))
+ isValid = false
+ }
+ }
+
+ // How about a TLS cert bundle?
+ caCertPEMBytes, ok := secret.Data[v1.TLSCertKey]
+
+ if ok && len(caCertPEMBytes) > 0 {
+ caCertBlock, _ := pem.Decode(caCertPEMBytes)
+
+ if caCertBlock != nil {
+ dlog.Debugf(ctx, "%s has public key, block type %s", secretName, caCertBlock.Type)
+
+ _, err := x509.ParseCertificate(caCertBlock.Bytes)
+
+ if err != nil {
+ errs = append(errs,
+ fmt.Errorf("%s %s cannot be parsed as x.509: %s", secretName, v1.TLSCertKey, err.Error()))
+ isValid = false
+ }
+ } else {
+ errs = append(errs,
+ fmt.Errorf("%s %s is not a PEM-encoded certificate", secretName, v1.TLSCertKey))
+ isValid = false
+ }
+ }
+
+ if isValid {
+ dlog.Debugf(ctx, "taking %s", secretName)
+ sh.k8sSnapshot.Secrets = append(sh.k8sSnapshot.Secrets, secret)
+ } else {
+ // This secret is invalid, but we're not going to log about it -- instead, it'll go into the
+ // list of Invalid resources.
+ dlog.Debugf(ctx, "%s is not valid, skipping: %s", secretName, errs.Error())
+
+ // We need to add this to our set of invalid resources. Sadly, this means we need to convert it
+ // to an Unstructured and redact various bits.
+ secretBytes, err := json.Marshal(secret)
+
+ if err != nil {
+ // This we'll log about, since it's impossible.
+ dlog.Errorf(ctx, "unable to marshal invalid %s: %s", secretName, err)
+ return
+ }
+
+ var unstructuredSecret kates.Unstructured
+ err = json.Unmarshal(secretBytes, &unstructuredSecret)
+
+ if err != nil {
+ // This we'll log about, since it's impossible.
+ dlog.Errorf(ctx, "unable to unmarshal invalid %s: %s", secretName, err)
+ return
+ }
+
+ // Construct a redacted version of things in the original data map.
+ redactedData := map[string]interface{}{}
+
+ for key := range secret.Data {
+ redactedData[key] = "-redacted-"
+ }
+
+ unstructuredSecret.Object["data"] = redactedData
+
+ // We have to toss the last-applied-configuration as well... and we may as well toss the
+ // managedFields.
+
+ metadata, ok := unstructuredSecret.Object["metadata"].(map[string]interface{})
+
+ if ok {
+ delete(metadata, "managedFields")
+
+ annotations, ok := metadata["annotations"].(map[string]interface{})
+
+ if ok {
+ delete(annotations, "kubectl.kubernetes.io/last-applied-configuration")
+
+ if len(annotations) == 0 {
+ delete(metadata, "annotations")
+ }
+ }
+
+ if len(metadata) == 0 {
+ delete(unstructuredSecret.Object, "metadata")
+ }
+ }
+
+ // Finally, mark it invalid.
+ sh.validator.addInvalid(ctx, &unstructuredSecret, errs.Error())
+ }
+}
+
// ReconcileSecrets figures out which secrets we're actually using,
// since we don't want to send secrets to Ambassador unless we're
// using them, since any secret we send will be saved to disk.
-func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot) error {
+func ReconcileSecrets(ctx context.Context, sh *SnapshotHolder) error {
// Start by building up a list of all the K8s objects that are
// allowed to mention secrets. Note that we vet the ambassador_id
// for all of these before putting them on the list.
@@ -25,7 +173,7 @@ func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot)
// them earlier so that we can treat them like any other resource
// here).
- for _, list := range s.Annotations {
+ for _, list := range sh.k8sSnapshot.Annotations {
for _, a := range list {
if _, isInvalid := a.(*kates.Unstructured); isInvalid {
continue
@@ -38,7 +186,7 @@ func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot)
// Hosts are a little weird, because we have two ways to find the
// ambassador_id. Sorry about that.
- for _, h := range s.Hosts {
+ for _, h := range sh.k8sSnapshot.Hosts {
var id amb.AmbassadorID
if len(h.Spec.AmbassadorID) > 0 {
id = h.Spec.AmbassadorID
@@ -49,17 +197,17 @@ func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot)
}
// TLSContexts, Modules, and Ingresses are all straightforward.
- for _, t := range s.TLSContexts {
+ for _, t := range sh.k8sSnapshot.TLSContexts {
if include(t.Spec.AmbassadorID) {
resources = append(resources, t)
}
}
- for _, m := range s.Modules {
+ for _, m := range sh.k8sSnapshot.Modules {
if include(m.Spec.AmbassadorID) {
resources = append(resources, m)
}
}
- for _, i := range s.Ingresses {
+ for _, i := range sh.k8sSnapshot.Ingresses {
resources = append(resources, i)
}
@@ -109,6 +257,9 @@ func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot)
findSecretRefs(ctx, resource, secretNamespacing, action)
}
+ // We _always_ have an implicit references to the cloud-connec-token secret...
+ secretRef(GetCloudConnectTokenResourceNamespace(), GetCloudConnectTokenResourceName(), false, action)
+
// We _always_ have an implicit references to the fallback cert secret...
secretRef(GetAmbassadorNamespace(), "fallback-self-signed-cert", false, action)
@@ -128,27 +279,25 @@ func ReconcileSecrets(ctx context.Context, s *snapshotTypes.KubernetesSnapshot)
// The way this works is kind of simple: first we check everything in
// FSSecrets. Then, when we check K8sSecrets, we skip any secrets that are
// also in FSSecrets. End result: FSSecrets wins if there are any conflicts.
- s.Secrets = make([]*kates.Secret, 0, len(refs))
+ sh.k8sSnapshot.Secrets = make([]*kates.Secret, 0, len(refs))
- for ref, secret := range s.FSSecrets {
+ for ref, secret := range sh.k8sSnapshot.FSSecrets {
if refs[ref] {
- dlog.Debugf(ctx, "Taking FSSecret %#v", ref)
- s.Secrets = append(s.Secrets, secret)
+ checkSecret(ctx, sh, "FSSecret", ref, secret)
}
}
- for _, secret := range s.K8sSecrets {
+ for _, secret := range sh.k8sSnapshot.K8sSecrets {
ref := snapshotTypes.SecretRef{Namespace: secret.GetNamespace(), Name: secret.GetName()}
- _, found := s.FSSecrets[ref]
+ _, found := sh.k8sSnapshot.FSSecrets[ref]
if found {
dlog.Debugf(ctx, "Conflict! skipping K8sSecret %#v", ref)
continue
}
if refs[ref] {
- dlog.Debugf(ctx, "Taking K8sSecret %#v", ref)
- s.Secrets = append(s.Secrets, secret)
+ checkSecret(ctx, sh, "K8sSecret", ref, secret)
}
}
return nil
diff --git a/cmd/entrypoint/testdata/FakeHello.yaml b/cmd/entrypoint/testdata/FakeHello.yaml
index 4970889a6a..1196ea5ef2 100644
--- a/cmd/entrypoint/testdata/FakeHello.yaml
+++ b/cmd/entrypoint/testdata/FakeHello.yaml
@@ -1,5 +1,29 @@
---
apiVersion: getambassador.io/v3alpha1
+kind: Listener
+metadata:
+ name: ambassador-https-listener
+spec:
+ port: 8443
+ protocol: HTTPS
+ securityModel: XFP
+ hostBinding:
+ namespace:
+ from: ALL
+---
+apiVersion: getambassador.io/v3alpha1
+kind: Listener
+metadata:
+ name: ambassador-http-listener
+spec:
+ port: 8080
+ protocol: HTTP
+ securityModel: XFP
+ hostBinding:
+ namespace:
+ from: ALL
+---
+apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
name: hello
@@ -7,3 +31,43 @@ metadata:
spec:
prefix: /hello
service: hello
+---
+# This is a good Secret.
+apiVersion: v1
+kind: Secret
+metadata:
+ name: tls-cert
+type: kubernetes.io/tls
+data:
+ tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNzRENDQVpnQ0NRRFd2TnRjRzNpelZEQU5CZ2txaGtpRzl3MEJBUXNGQURBYU1SZ3dGZ1lEVlFRRERBOWgKYldKaGMzTmhaRzl5TFdObGNuUXdIaGNOTWpFd056QTRNakF5T0RNd1doY05Nakl3TnpBNE1qQXlPRE13V2pBYQpNUmd3RmdZRFZRUUREQTloYldKaGMzTmhaRzl5TFdObGNuUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQ1pVbXhqT1lrTWlKRm0yZSttZDlMelNwd0oxSWlic1lUWHp5a1NiMExZYlNqcG5jMGoKV0dWMEppOXdlU3FSSFFPMHM4NUZreENzT2s1K2ZCWDFJOTYra1Z2V3NyeWgwcDlsdjI3ZUpHZFp1Q1ZsSmR3cApuYnBaWFF6R3JjWVVaeTA2WEVWOGxkaFdOSVhMazc1bmxsWmE5M2xjajRXRzNTRHpzT2MrdEtWaEtNaG9QSkVaClVGbXNxZ080dm8yZkJxYk0zNXhBT3lFSHhodXgvVlNLeVIxbHN0S0dsd25icGliZDc2UUZCdWYwbHN2bEJRTFAKV2xiRW8zZzI0NWxMNFhMWjg2UURoaTJseTdSNFN5em4yZ2E2TjZYQWNxMjFYTzNQUzhPaFp6d2J1cGpEMHRadApxL0JjY01kTElXbm9zVmlpc0FVdElLUHpCbjVkNFhBaGRtVnhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFECmdnRUJBSmFONUVxcTlqMi9IVnJWWk9wT3BuWVRSZlU0OU1pNDlvbkF1ZjlmQk9tR3dlMHBWSmpqTVlyQW9kZ1IKYWVyUHVWUlBEWGRzZXczejJkMjliRzBMVTJTdEpBMEY0Z05vWTY0bGVZUTN0RjFDUmxsczdKaWVWelN1RVVyUwpLZjZiaWJ0aUlLSU4waEdTV3R2YU04ZXhqb2Y3ZGUyeWFLNEVPeE1pQmJyZkFPNnJ6MXgzc1ovOENGTnp3OXNRClhCNWpZSWhNZWhsb2xhR0U5RGNydUdrbStFQ3ZCNjZkajFNcm5UamVJcWc4QnN4Wm5WYlZ4cDlUZTJRZ2hyTmkKckVySndjV1NSU3lUZzBEZXdUektYQUx2aW5iRTliZ3pNdFhNSEhkUmZQYUMvWmFCTUd1QXExeWJTOUV3M2MvWgo1dk00aFdOaHU5MS9DSmN5UVJHdlJRWXFiZTA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+ tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbVZKc1l6bUpESWlSWnRudnBuZlM4MHFjQ2RTSW03R0UxODhwRW05QzJHMG82WjNOCkkxaGxkQ1l2Y0hrcWtSMER0TFBPUlpNUXJEcE9mbndWOVNQZXZwRmIxcks4b2RLZlpiOXUzaVJuV2JnbFpTWGMKS1oyNldWME14cTNHRkdjdE9seEZmSlhZVmpTRnk1TytaNVpXV3ZkNVhJK0ZodDBnODdEblByU2xZU2pJYUR5UgpHVkJacktvRHVMNk5ud2Ftek4rY1FEc2hCOFlic2YxVWlza2RaYkxTaHBjSjI2WW0zZStrQlFibjlKYkw1UVVDCnoxcFd4S040TnVPWlMrRnkyZk9rQTRZdHBjdTBlRXNzNTlvR3VqZWx3SEt0dFZ6dHowdkRvV2M4RzdxWXc5TFcKYmF2d1hIREhTeUZwNkxGWW9yQUZMU0NqOHdaK1hlRndJWFpsY1FJREFRQUJBb0lCQUdlaVNOVUE3TnZsNjdKRAptVE5DUnZwZjhmekxCZE9IT0MzUFB3blEzclAvaE9uejJkY01SdmN0WUY5NzV3UFRRcy8vd1d0UnJyRmJiL2NhCjFKU3dQRDAvYjM0OXJqY0xjT2FMY05zQ2JFRStzVGdmVVNOb0U2K1hyNjBUaEpJQjg1WkJERTdiMGpEaXE1VWgKTmxBNlZBQ0V5aW1BY1ZicFhQNmJFcE5WODNzcDFBUEUrc2xpUWVrMHBWK2VJcFNuWGNkMWRNbjdhcHNuYmR3MgpBbDErRDBiTkJweUNSd1dCMm81dmh0ZzIrcndaQUNOdTFQdmJGc0g5bURGUit2elJBT1oycFRDMzRwWDBhcktECnUyMGFMTU1PT2NETWN0bWp2OHJrcVJVRWt6aTNuV0ljWVVVYXFKVG1Ub2RLZlRobXhsbGx5aDg5UVAzUG8raEwKYWk0b0VJa0NnWUVBeUcvQ2xaa3g4WHF3M0NrZXJrZlZoTm83OHIxWFVFRFE3U3dMUUllaHo1Ri9iRU9WRS8yUgpJeGFZQkx2alMwRkRzY0s3TkpPeU9kTVBpU1VNcHdMSC9kNnNrWjA2QWRTVllPbUJpNUFCMUJNZXk1b0cvSmtXClpzSm42Q3g5aEJUZTVzQnRCUWQ1K1phUXU4aDBhUFcwcFh3b1h5aW1JejNpZ3Vxdk1Dc3plNU1DZ1lFQXc5TWQKY2ZmK1FhbmxqMnVpcmcxbnljMFVkN1h6ekFZRXVMUERPdHltcm1SSU9BNGl4c3JzMzRmbVE4STc4cXpDMnhmNQpEdlJPNTNzMW9XSHNzbXdxcmgzQ0RVaDF2UEVEcHVqR3dLd2E4bE1yQ2piWDhtYk1ibVNyelBuczVWeVhXaEhFCkN3VHNPV3RleUZ3OVFkZTR1K011SEYzSHB0SHFvZlRFTGZJRXBXc0NnWUVBdVBPM3dFZGVlSTlZUjYrQjZodkwKQVE1SHB4UGtUOStmYWxyci94Mm95RnBnRkV6QWNYUFh5Mkw3MzlKb1NIYnV1a2NRYTlHbDhnbTZHamtmMWJTUgpTc2VBd2RVdFE2Y2dPQThBUlFJYlRkQmU2RTAzQ1R0U0dueGxXUzVFbSs2T1NLdGpiZkthTVI4b2FyN3IvRFpOCi9TMzJLdWpkZFVPVGttNXdQYWgvbHhVQ2dZQmh3N0dNcDZJQmlGKzZaYU5YUUF3VC9OWCtHaEg0UnZ6dWRaaS8KZDArait4N3ZGV2VaVmRCQ25PZUI1cVBsT1Frak51bTU1SkRNRW9BbzdPbXQva0Nrb3VpeGx2NW84TzdBMHEvLwpteXpzMUViRmw3SGlMQjVkOHRhdXhBdllTb3lwZy9zYkFUOHFQNGVYZ2kxM0JNc095cEhIeWE0V2cvQ2ZJTU1jCnFScFd0d0tCZ0hYRjVSWUo4alpLTnE2bHI5TVZhZFRXdkpRZ01VbHR0UWlaM3ZrMmc0S09Kc1NWdWtEbjFpZysKQ0NKZUU2VS9OS0N3ejJSMXBjSWVET3dwek9IYzJWNkt4Z0RYZUYyVWsvMjMydlB3aXRjVExhS2hsTTlDOGNLcwp6RGlJcVFkZDRLdFhDajc4S040TlhHZ1hJdVdXOHZERFY4Q05wQm45eUlUUXFST3NRSHRrCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
+---
+# This Host references our good Secret, so it'll get loaded.
+apiVersion: getambassador.io/v3alpha1
+kind: Host
+metadata:
+ name: wildcard-host
+spec:
+ hostname: "*"
+ tlsSecret:
+ name: tls-cert
+---
+# This is an invalid Secret.
+apiVersion: v1
+kind: Secret
+metadata:
+ name: tls-broken-cert
+type: kubernetes.io/tls
+data:
+ tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNzRENDQVpnQ0NRRFd2TnRjRzNpelZEQU5CZ2txaGtpRzl3MEJBUXNGQURBYU1SZ3dGZ1lEVlFRRERBOWgKYldKaGMzTmhaRzl5TFdObGNuUXdIaGNOTWpFd056QTRNakF5T0RNd1doY05Nakl3TnpBNE1qQXlPRE13V2pBYQpNUmd3RmdZRFZRUUREQTloYldKaGMzTmhaRzl5TFdObGNuUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQ1pVbXhqT1lrTWlKRm0yZSttZDlMelNwd0oxSWlic1lUWHp5a1NiMExZYlNqcG5jMGoKZ2dFQkFKYU41RXFxOWoyL0hWclZaT3BPcG5ZVFJmVTQ5TWk0OW9uQXVmOWZCT21Hd2UwcFZKampNWXJBb2RnUgphZXJQdVZSUERYZHNldzN6MmQyOWJHMExVMlN0SkEwRjRnTm9ZNjRsZVlRM3RGMUNSbGxzN0ppZVZ6U3VFVXJTCktmNmJpYnRpSUtJTjBoR1NXdHZhTThleGpvZjdkZTJ5YUs0RU94TWlCYnJmQU82cnoxeDNzWi84Q0ZOenc5c1EKWEI1allJaE1laGxvbGFHRTlEY3J1R2ttK0VDdkI2NmRqMU1yblRqZUlxZzhCc3hablZiVnhwOVRlMlFnaHJOaQpyRXJKd2NXU1JTeVRnMERld1R6S1hBTHZpbmJFOWJnek10WE1ISGRSZlBhQy9aYUJNR3VBcTF5YlM5RXczYy9aCjV2TTRoV05odTkxL0NKY3lRUkd2UlFZcWJlMD0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
+ tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbVZKc1l6bUpESWlSWnRudnBuZlM4MHFjQ2RTSW03R0UxODhwRW05QzJHMG82WjNOCkkxaGxkQ1l2Y0hrcWtSMER0TFBPUlpNUXJEcE9mbndWOVNQZXZwRmIxcks4b2RLZlpiOXUzaVJuV2JnbFpTWGMKS1oyNldWME14cTNHRkdjdE9seEZmSlhZVmpTRnk1TytaNVpXV3ZkNVhJK0ZodDBnODdEblByU2xZU2pJYUR5UgpHVkJacktvRHVMNk5ud2Ftek4rY1FEc2hCOFlic2YxVWlza2RaYkxTaHBjSjI2WW0zZStrQlFibjlKYkw1UVVDCkFsMStEMGJOQnB5Q1J3V0IybzV2aHRnMityd1pBQ051MVB2YkZzSDltREZSK3Z6UkFPWjJwVEMzNHBYMGFyS0QKdTIwYUxNTU9PY0RNY3RtanY4cmtxUlVFa3ppM25XSWNZVVVhcUpUbVRvZEtmVGhteGxsbHloODlRUDNQbytoTAphaTRvRUlrQ2dZRUF5Ry9DbFpreDhYcXczQ2tlcmtmVmhObzc4cjFYVUVEUTdTd0xRSWVoejVGL2JFT1ZFLzJSCkl4YVlCTHZqUzBGRHNjSzdOSk95T2RNUGlTVU1wd0xIL2Q2c2taMDZBZFNWWU9tQmk1QUIxQk1leTVvRy9Ka1cKWnNKbjZDeDloQlRlNXNCdEJRZDUrWmFRdThoMGFQVzBwWHdvWHlpbUl6M2lndXF2TUNzemU1TUNnWUVBdzlNZApjZmYrUWFubGoydWlyZzFueWMwVWQ3WHp6QVlFdUxQRE90eW1ybVJJT0E0aXhzcnMzNGZtUThJNzhxekMyeGY1CkR2Uk81M3Mxb1dIc3Ntd3FyaDNDRFVoMXZQRURwdWpHd0t3YThsTXJDamJYOG1iTWJtU3J6UG5zNVZ5WFdoSEUKQ3dUc09XdGV5Rnc5UWRlNHUrTXVIRjNIcHRIcW9mVEVMZklFcFdzQ2dZRUF1UE8zd0VkZWVJOVlSNitCNmh2TApBUTVIcHhQa1Q5K2ZhbHJyL3gyb3lGcGdGRXpBY1hQWHkyTDczOUpvU0hidXVrY1FhOUdsOGdtNkdqa2YxYlNSClNzZUF3ZFV0UTZjZ09BOEFSUUliVGRCZTZFMDNDVHRTR254bFdTNUVtKzZPU0t0amJmS2FNUjhvYXI3ci9EWk4KL1MzMkt1amRkVU9Ua201d1BhaC9seFVDZ1lCaHc3R01wNklCaUYrNlphTlhRQXdUL05YK0doSDRSdnp1ZFppLwpkMCtqK3g3dkZXZVpWZEJDbk9lQjVxUGxPUWtqTnVtNTVKRE1Fb0FvN09tdC9rQ2tvdWl4bHY1bzhPN0EwcS8vCm15enMxRWJGbDdIaUxCNWQ4dGF1eEF2WVNveXBnL3NiQVQ4cVA0ZVhnaTEzQk1zT3lwSEh5YTRXZy9DZklNTWMKcVJwV3R3S0JnSFhGNVJZSjhqWktOcTZscjlNVmFkVFd2SlFnTVVsdHRRaVozdmsyZzRLT0pzU1Z1a0RuMWlnKwpDQ0plRTZVL05LQ3d6MlIxcGNJZURPd3B6T0hjMlY2S3hnRFhlRjJVay8yMzJ2UHdpdGNUTGFLaGxNOUM4Y0tzCnpEaUlxUWRkNEt0WENqNzhLTjROWEdnWEl1V1c4dkREVjhDTnBCbjl5SVRRcVJPc1FIdGsKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0=
+---
+# This Host references our invalid Secret, so it'll get loaded.
+apiVersion: getambassador.io/v3alpha1
+kind: Host
+metadata:
+ name: broken-host
+spec:
+ hostname: "*"
+ tlsSecret:
+ name: tls-broken-cert
diff --git a/cmd/entrypoint/testdata/tls-snap.yaml b/cmd/entrypoint/testdata/tls-snap.yaml
index 4ba91e04f3..c0cae82a4a 100644
--- a/cmd/entrypoint/testdata/tls-snap.yaml
+++ b/cmd/entrypoint/testdata/tls-snap.yaml
@@ -1,8 +1,8 @@
---
apiVersion: v1
data:
- tls.crt: dGVzdC1jZXJ0Cg==
- tls.key: dGVzdC1rZXkK
+ tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNzRENDQVpnQ0NRRFd2TnRjRzNpelZEQU5CZ2txaGtpRzl3MEJBUXNGQURBYU1SZ3dGZ1lEVlFRRERBOWgKYldKaGMzTmhaRzl5TFdObGNuUXdIaGNOTWpFd056QTRNakF5T0RNd1doY05Nakl3TnpBNE1qQXlPRE13V2pBYQpNUmd3RmdZRFZRUUREQTloYldKaGMzTmhaRzl5TFdObGNuUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCCkR3QXdnZ0VLQW9JQkFRQ1pVbXhqT1lrTWlKRm0yZSttZDlMelNwd0oxSWlic1lUWHp5a1NiMExZYlNqcG5jMGoKV0dWMEppOXdlU3FSSFFPMHM4NUZreENzT2s1K2ZCWDFJOTYra1Z2V3NyeWgwcDlsdjI3ZUpHZFp1Q1ZsSmR3cApuYnBaWFF6R3JjWVVaeTA2WEVWOGxkaFdOSVhMazc1bmxsWmE5M2xjajRXRzNTRHpzT2MrdEtWaEtNaG9QSkVaClVGbXNxZ080dm8yZkJxYk0zNXhBT3lFSHhodXgvVlNLeVIxbHN0S0dsd25icGliZDc2UUZCdWYwbHN2bEJRTFAKV2xiRW8zZzI0NWxMNFhMWjg2UURoaTJseTdSNFN5em4yZ2E2TjZYQWNxMjFYTzNQUzhPaFp6d2J1cGpEMHRadApxL0JjY01kTElXbm9zVmlpc0FVdElLUHpCbjVkNFhBaGRtVnhBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFECmdnRUJBSmFONUVxcTlqMi9IVnJWWk9wT3BuWVRSZlU0OU1pNDlvbkF1ZjlmQk9tR3dlMHBWSmpqTVlyQW9kZ1IKYWVyUHVWUlBEWGRzZXczejJkMjliRzBMVTJTdEpBMEY0Z05vWTY0bGVZUTN0RjFDUmxsczdKaWVWelN1RVVyUwpLZjZiaWJ0aUlLSU4waEdTV3R2YU04ZXhqb2Y3ZGUyeWFLNEVPeE1pQmJyZkFPNnJ6MXgzc1ovOENGTnp3OXNRClhCNWpZSWhNZWhsb2xhR0U5RGNydUdrbStFQ3ZCNjZkajFNcm5UamVJcWc4QnN4Wm5WYlZ4cDlUZTJRZ2hyTmkKckVySndjV1NSU3lUZzBEZXdUektYQUx2aW5iRTliZ3pNdFhNSEhkUmZQYUMvWmFCTUd1QXExeWJTOUV3M2MvWgo1dk00aFdOaHU5MS9DSmN5UVJHdlJRWXFiZTA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
+ tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBbVZKc1l6bUpESWlSWnRudnBuZlM4MHFjQ2RTSW03R0UxODhwRW05QzJHMG82WjNOCkkxaGxkQ1l2Y0hrcWtSMER0TFBPUlpNUXJEcE9mbndWOVNQZXZwRmIxcks4b2RLZlpiOXUzaVJuV2JnbFpTWGMKS1oyNldWME14cTNHRkdjdE9seEZmSlhZVmpTRnk1TytaNVpXV3ZkNVhJK0ZodDBnODdEblByU2xZU2pJYUR5UgpHVkJacktvRHVMNk5ud2Ftek4rY1FEc2hCOFlic2YxVWlza2RaYkxTaHBjSjI2WW0zZStrQlFibjlKYkw1UVVDCnoxcFd4S040TnVPWlMrRnkyZk9rQTRZdHBjdTBlRXNzNTlvR3VqZWx3SEt0dFZ6dHowdkRvV2M4RzdxWXc5TFcKYmF2d1hIREhTeUZwNkxGWW9yQUZMU0NqOHdaK1hlRndJWFpsY1FJREFRQUJBb0lCQUdlaVNOVUE3TnZsNjdKRAptVE5DUnZwZjhmekxCZE9IT0MzUFB3blEzclAvaE9uejJkY01SdmN0WUY5NzV3UFRRcy8vd1d0UnJyRmJiL2NhCjFKU3dQRDAvYjM0OXJqY0xjT2FMY05zQ2JFRStzVGdmVVNOb0U2K1hyNjBUaEpJQjg1WkJERTdiMGpEaXE1VWgKTmxBNlZBQ0V5aW1BY1ZicFhQNmJFcE5WODNzcDFBUEUrc2xpUWVrMHBWK2VJcFNuWGNkMWRNbjdhcHNuYmR3MgpBbDErRDBiTkJweUNSd1dCMm81dmh0ZzIrcndaQUNOdTFQdmJGc0g5bURGUit2elJBT1oycFRDMzRwWDBhcktECnUyMGFMTU1PT2NETWN0bWp2OHJrcVJVRWt6aTNuV0ljWVVVYXFKVG1Ub2RLZlRobXhsbGx5aDg5UVAzUG8raEwKYWk0b0VJa0NnWUVBeUcvQ2xaa3g4WHF3M0NrZXJrZlZoTm83OHIxWFVFRFE3U3dMUUllaHo1Ri9iRU9WRS8yUgpJeGFZQkx2alMwRkRzY0s3TkpPeU9kTVBpU1VNcHdMSC9kNnNrWjA2QWRTVllPbUJpNUFCMUJNZXk1b0cvSmtXClpzSm42Q3g5aEJUZTVzQnRCUWQ1K1phUXU4aDBhUFcwcFh3b1h5aW1JejNpZ3Vxdk1Dc3plNU1DZ1lFQXc5TWQKY2ZmK1FhbmxqMnVpcmcxbnljMFVkN1h6ekFZRXVMUERPdHltcm1SSU9BNGl4c3JzMzRmbVE4STc4cXpDMnhmNQpEdlJPNTNzMW9XSHNzbXdxcmgzQ0RVaDF2UEVEcHVqR3dLd2E4bE1yQ2piWDhtYk1ibVNyelBuczVWeVhXaEhFCkN3VHNPV3RleUZ3OVFkZTR1K011SEYzSHB0SHFvZlRFTGZJRXBXc0NnWUVBdVBPM3dFZGVlSTlZUjYrQjZodkwKQVE1SHB4UGtUOStmYWxyci94Mm95RnBnRkV6QWNYUFh5Mkw3MzlKb1NIYnV1a2NRYTlHbDhnbTZHamtmMWJTUgpTc2VBd2RVdFE2Y2dPQThBUlFJYlRkQmU2RTAzQ1R0U0dueGxXUzVFbSs2T1NLdGpiZkthTVI4b2FyN3IvRFpOCi9TMzJLdWpkZFVPVGttNXdQYWgvbHhVQ2dZQmh3N0dNcDZJQmlGKzZaYU5YUUF3VC9OWCtHaEg0UnZ6dWRaaS8KZDArait4N3ZGV2VaVmRCQ25PZUI1cVBsT1Frak51bTU1SkRNRW9BbzdPbXQva0Nrb3VpeGx2NW84TzdBMHEvLwpteXpzMUViRmw3SGlMQjVkOHRhdXhBdllTb3lwZy9zYkFUOHFQNGVYZ2kxM0JNc095cEhIeWE0V2cvQ2ZJTU1jCnFScFd0d0tCZ0hYRjVSWUo4alpLTnE2bHI5TVZhZFRXdkpRZ01VbHR0UWlaM3ZrMmc0S09Kc1NWdWtEbjFpZysKQ0NKZUU2VS9OS0N3ejJSMXBjSWVET3dwek9IYzJWNkt4Z0RYZUYyVWsvMjMydlB3aXRjVExhS2hsTTlDOGNLcwp6RGlJcVFkZDRLdFhDajc4S040TlhHZ1hJdVdXOHZERFY4Q05wQm45eUlUUXFST3NRSHRrCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: test-k8s-secret
diff --git a/cmd/entrypoint/testutil_fake_hello_test.go b/cmd/entrypoint/testutil_fake_hello_test.go
index e203cb0a1e..76eec8897d 100644
--- a/cmd/entrypoint/testutil_fake_hello_test.go
+++ b/cmd/entrypoint/testutil_fake_hello_test.go
@@ -28,7 +28,7 @@ func TestFakeHello(t *testing.T) {
// Use RunFake() to spin up the ambassador control plane with its inputs wired up to the Fake
// APIs. This will automatically invoke the Setup() method for the Fake and also register the
// Teardown() method with the Cleanup() hook of the supplied testing.T object.
- f := entrypoint.RunFake(t, entrypoint.FakeConfig{}, nil)
+ f := entrypoint.RunFake(t, entrypoint.FakeConfig{EnvoyConfig: false}, nil)
// The Fake harness has a store for both kubernetes resources and consul endpoint data. We can
// use the UpsertFile() to method to load as many resources as we would like. This is much like
@@ -53,11 +53,24 @@ func TestFakeHello(t *testing.T) {
// computation is occurring without being overly prescriptive about the exact number of
// snapshots and/or envoy configs that are produce to achieve a certain result.
snap, err := f.GetSnapshot(func(snap *snapshot.Snapshot) bool {
- return len(snap.Kubernetes.Mappings) > 0
+ hasMappings := len(snap.Kubernetes.Mappings) > 0
+ hasSecrets := len(snap.Kubernetes.Secrets) > 0
+ hasInvalid := len(snap.Invalid) > 0
+
+ return hasMappings && hasSecrets && hasInvalid
})
require.NoError(t, err)
+
// Check that the snapshot contains the mapping from the file.
assert.Equal(t, "hello", snap.Kubernetes.Mappings[0].Name)
+
+ // This snapshot also needs to have one good secret...
+ assert.Equal(t, 1, len(snap.Kubernetes.Secrets))
+ assert.Equal(t, "tls-cert", snap.Kubernetes.Secrets[0].Name)
+
+ // ...and one invalid secret.
+ assert.Equal(t, 1, len(snap.Invalid))
+ assert.Equal(t, "tls-broken-cert", snap.Invalid[0].GetName())
}
// By default the Fake struct only invokes the first part of the pipeline that forms the control
diff --git a/cmd/entrypoint/testutil_fake_k8s_store_test.go b/cmd/entrypoint/testutil_fake_k8s_store_test.go
index 13ad546485..fad472be05 100644
--- a/cmd/entrypoint/testutil_fake_k8s_store_test.go
+++ b/cmd/entrypoint/testutil_fake_k8s_store_test.go
@@ -213,6 +213,8 @@ func canonGVK(rawString string) (canonKind string, canonGroupVersion string, err
return "Endpoints", "v1", nil
case "secret", "secrets":
return "Secret", "v1", nil
+ case "configmap", "configmaps":
+ return "ConfigMap", "v1", nil
case "ingress", "ingresses":
if strings.HasSuffix(rawVG, ".knative.dev") {
return "Ingress", "networking.internal.knative.dev/v1alpha1", nil
diff --git a/cmd/entrypoint/testutil_fake_test_test.go b/cmd/entrypoint/testutil_fake_test_test.go
index bfbf04fe55..a60e90c898 100644
--- a/cmd/entrypoint/testutil_fake_test_test.go
+++ b/cmd/entrypoint/testutil_fake_test_test.go
@@ -304,8 +304,8 @@ func TestFakeIstioCert(t *testing.T) {
},
Type: kates.SecretTypeTLS,
Data: map[string][]byte{
- "tls.key": []byte("not-real-cert"),
- "tls.crt": []byte("not-real-pem"),
+ "tls.crt": k.Secrets[0].Data["tls.crt"],
+ "tls.key": k.Secrets[0].Data["tls.key"],
},
}
diff --git a/cmd/entrypoint/watcher.go b/cmd/entrypoint/watcher.go
index 0315114273..d3f0d6ddad 100644
--- a/cmd/entrypoint/watcher.go
+++ b/cmd/entrypoint/watcher.go
@@ -407,7 +407,7 @@ func (sh *SnapshotHolder) K8sUpdate(
})
reconcileSecretsTimer.Time(func() {
- err = ReconcileSecrets(ctx, sh.k8sSnapshot)
+ err = ReconcileSecrets(ctx, sh)
})
if err != nil {
return false, err
@@ -525,7 +525,7 @@ func (sh *SnapshotHolder) IstioUpdate(ctx context.Context, istio *istioCertWatch
var err error
reconcileSecretsTimer.Time(func() {
- err = ReconcileSecrets(ctx, sh.k8sSnapshot)
+ err = ReconcileSecrets(ctx, sh)
})
if err != nil {
return false, err
diff --git a/docker/kat-client/kat_client b/docker/kat-client/kat_client
new file mode 120000
index 0000000000..020b49ae88
--- /dev/null
+++ b/docker/kat-client/kat_client
@@ -0,0 +1 @@
+/usr/local/bin/kat-client
\ No newline at end of file
diff --git a/docker/test-auth/Dockerfile b/docker/test-auth/Dockerfile
index e2463c6684..d419bcf20b 100644
--- a/docker/test-auth/Dockerfile
+++ b/docker/test-auth/Dockerfile
@@ -1,5 +1,5 @@
# The `test-auth` image gets built by `build-aux/check.mk` for use by
-# `python/kat/harness.py:TestImage`.
+# `python/tests/ingegration/manifests.py`.
# Copyright 2018 Datawire. All rights reserved.
#
diff --git a/docker/test-ratelimit/Dockerfile b/docker/test-ratelimit/Dockerfile
deleted file mode 100644
index 655768edab..0000000000
--- a/docker/test-ratelimit/Dockerfile
+++ /dev/null
@@ -1,33 +0,0 @@
-# The `test-ratelimit` image gets built by `build-aux/check.mk` for
-# use by `python/kat/harness.py:TestImage`.
-
-# Copyright 2018 Datawire. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-FROM node:10-alpine
-
-LABEL PROJECT_REPO_URL = "git@github.com:datawire/ambassador.git" \
- PROJECT_REPO_BROWSER_URL = "https://github.com/datawire/ambassador" \
- DESCRIPTION = "Ambassador REST Service" \
- VENDOR = "Datawire" \
- VENDOR_URL = "https://datawire.io/"
-
-ENV NODE_ENV production
-
-WORKDIR /app
-
-ADD . .
-RUN npm install
-
-CMD ["npm", "start"]
diff --git a/docker/test-ratelimit/package.json b/docker/test-ratelimit/package.json
deleted file mode 100644
index 22d07523ab..0000000000
--- a/docker/test-ratelimit/package.json
+++ /dev/null
@@ -1,16 +0,0 @@
-{
- "name": "ratelimit-service",
- "version": "0.0.3",
- "description": "Dummy rate limiter implementation for Ambassador API Gateway end-to-end tests",
- "main": "server.js",
- "engines": {
- "node": ">=8.9.4",
- "npm": ">=5.6.0"
- },
- "scripts": {
- "start": "node server.js"
- },
- "dependencies": {
- "grpc": "^1.10.1"
- }
-}
diff --git a/docker/test-ratelimit/ratelimit.crt b/docker/test-ratelimit/ratelimit.crt
deleted file mode 100644
index 323b96090d..0000000000
--- a/docker/test-ratelimit/ratelimit.crt
+++ /dev/null
@@ -1,23 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID2jCCAsKgAwIBAgIRAKlRg3DeRR97bt/PNtG2qw0wDQYJKoZIhvcNAQELBQAw
-ezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1BMQ8wDQYDVQQHEwZCb3N0b24xGDAW
-BgNVBAoTD0FtYmFzc2Fkb3IgTGFiczEUMBIGA1UECxMLRW5naW5lZXJpbmcxHjAc
-BgNVBAMTFXJhdGVsaW1pdC5kYXRhd2lyZS5pbzAgFw0yMTExMTAxMzEyMDBaGA8y
-MDk5MTExMDEzMTIwMFowezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1BMQ8wDQYD
-VQQHEwZCb3N0b24xGDAWBgNVBAoTD0FtYmFzc2Fkb3IgTGFiczEUMBIGA1UECxML
-RW5naW5lZXJpbmcxHjAcBgNVBAMTFXJhdGVsaW1pdC5kYXRhd2lyZS5pbzCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALypYKoQpEKbSyG3rVM2ddDt3XqD
-ra6nfBmZ8YqW03dLE0XY8hED2KPruX281SKSU5vgGmA0IRUfelxFxS2Rznrk3CGb
-lsBBka8GEXQF6TRtcHb1CQHZqeylPBAeuXaMXrwR8fcKXspu+9BHzjkd7w9Fbp7F
-6cubtPMGSPzpxhF7FJ+SEuWEzKSonWKa93rk4+ytIcuVZeWmdirZbpuP6Bel05Cu
-i9Vs6Qia68AQ5tQvsKQoWUkFSJANeY7WMzqEgt+BG0hKt658otVOAJdyFPEA96j7
-6CFjS9VXxcD18BruPWdil/6gprQhc/XVRU4cUrOOqPmoKhtmDekLY6Cka9MCAwEA
-AaNXMFUwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1Ud
-EwEB/wQCMAAwIAYDVR0RBBkwF4IVcmF0ZWxpbWl0LmRhdGF3aXJlLmlvMA0GCSqG
-SIb3DQEBCwUAA4IBAQCIZJnPY9NkOY0w8GaVDHrkHIREU8r/B2cv3dNn0k9ktvlC
-0rgUvg7ZDzsr7h+uWI5VgxED1KpnQbDcHMQL2Wk0+z5xJgP+wj1ueSCniJGeOUEH
-zWZQ4rfs8jUFkBT+Is12YX+YEOGYP71+EzmbGK3glRfbI+NrtJuv+vpiZKcQzHfq
-V3IpUKpEJ0o4XVUuBKtnVXcWrR+KlJQCY2vC5eSMstjgC5YKVBRiVqbyIGA/ThDq
-BpKO3eeUmF2SWhIzCCgLq49iTaBpSzw7mFZdQsOTyXQVVppOmcjqTiF3j8FaVTE5
-WWblE/fD+ZXIPEMxs9te3T9/DIKDM8AyxoJ1Jh7K
------END CERTIFICATE-----
diff --git a/docker/test-ratelimit/ratelimit.key b/docker/test-ratelimit/ratelimit.key
deleted file mode 100644
index 7e2cb1761e..0000000000
--- a/docker/test-ratelimit/ratelimit.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC8qWCqEKRCm0sh
-t61TNnXQ7d16g62up3wZmfGKltN3SxNF2PIRA9ij67l9vNUiklOb4BpgNCEVH3pc
-RcUtkc565Nwhm5bAQZGvBhF0Bek0bXB29QkB2anspTwQHrl2jF68EfH3Cl7KbvvQ
-R845He8PRW6exenLm7TzBkj86cYRexSfkhLlhMykqJ1imvd65OPsrSHLlWXlpnYq
-2W6bj+gXpdOQrovVbOkImuvAEObUL7CkKFlJBUiQDXmO1jM6hILfgRtISreufKLV
-TgCXchTxAPeo++ghY0vVV8XA9fAa7j1nYpf+oKa0IXP11UVOHFKzjqj5qCobZg3p
-C2OgpGvTAgMBAAECggEBAJ+xd6M8pu3CaZxGz63qIVwSnDDCGVgHaSJ6jlxTQvht
-UgkDlBMXAF/wfniSSI8U8TS2Q10/gulQVdCZNkhWbULVSgggnUBrwBc4ublN75Jz
-OIlY7KDmT9GCJA85Ep/oPaBQSFJmMsqDmx84SLVMQzjX+sTmnfm8+TPlFA8RCplr
-83S8PqVVGGh0RdMyFh0Tsv5wqWodxv09vmUgeq42S76NsSg8A6HrVP15EAZcEHiM
-zOSUYqh58EEDXHOscMy/8bUGY07EHXZmvsTCMDd3Dsx/7QJ4o1AiS0y5JXaMxDyB
-JSUyQv6zIwzNNyJ7lwqZ9J56B1xcNJoDAw3gCz9/KXECgYEA338WkkQgJrROXepz
-508IP7W1p8jp4/1U+MWRjpriwdhuAXnzB4odPL1SYfFPHfgG8RYBMNQAJDkeIoSt
-xrAJhHkFlJOYzCNos3Aeo8sjqzchWZRnjuJSodRhSShWtJJmuufR55bhYPAr/12h
-nwpwS0HWdGvK3v5wPtCksCmfwUcCgYEA2BleKiIQQSBoW853vemHrIVKTaGULeU7
-degM2lnZBuGKbSxaFxvipj64wORrFm54JxOnCXXWuSJM8tRmoQPA10M3SmUHN/Fu
-v529QMm5sjB17EfQeIymhuFxHm2xvk9mul9WnaozBX29OrVMlwQoqyMzZu1fsXog
-AsIb6wUKZxUCgYBDrtIgE3+FGR+Oc30MNLPzz0ym9kJWqBZ+jB5riF5Zg/i0e8Ds
-rJf0GAWF4bUrBzza7+YGan1setu0amfR/uey9Y+KEjS4xZRkmvS8d71ikXyJC1dd
-Pw71MUMRC6VOY/O2cJPxxZCVccZxPGLArkGZmOOgODCk3XsSms71BnX56wKBgHN+
-S1dVUT6dZWj7lf+H2h0YN7f5zUoiI39Gf+gK8PS+gc8LTzLekmmrR+6/pYQdklXA
-KRvjQNk9PcbiQd94NA5YPCqkiBEcFcaPNWB076wOPlgDoaVr9mxL/Lr4gXBm205s
-OcyV9CLRKts/nilv7ZRZgdVWtDgUPxt6RpV64i5pAoGAGGIwDJAK8ctCtwnbRhJy
-xVHwVcdVjFAEZj0Ytw34FCV6OpJfztvkThA7uqg9yNMwHBdx1vge6Xzxy4rniEtE
-DyFlHyWTrFwoDBYy4d7cNgDJnaIN2qBY36GDyL2x7/DyKd5+CQN07XWuOmnGrzIo
-6ABc+KN1kmXbr9VteFRagAI=
------END PRIVATE KEY-----
diff --git a/docker/test-ratelimit/ratelimit.proto b/docker/test-ratelimit/ratelimit.proto
deleted file mode 100644
index 85c75c1d30..0000000000
--- a/docker/test-ratelimit/ratelimit.proto
+++ /dev/null
@@ -1,104 +0,0 @@
-// Downloaded from https://raw.githubusercontent.com/envoyproxy/ratelimit/v1.3.0/proto/ratelimit/ratelimit.proto
-
-syntax = "proto3";
-
-option go_package = "ratelimit";
-
-option cc_generic_services = true;
-
-package pb.lyft.ratelimit;
-
-service RateLimitService {
- // Determine whether rate limiting should take place.
- rpc ShouldRateLimit (RateLimitRequest) returns (RateLimitResponse) {}
-}
-
-// Main message for a rate limit request. The rate limit service is designed to be fully generic
-// in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded
-// configuration will parse the request and find the most specific limit to apply. In addition,
-// a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors
-// are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any
-// of them are over limit. This enables more complex application level rate limiting scenarios
-// if desired.
-message RateLimitRequest {
- // All rate limit requests must specify a domain. This enables the configuration to be per
- // application without fear of overlap. E.g., "envoy".
- string domain = 1;
- // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is
- // processed by the service (see below). If any of the descriptors are over limit, the entire
- // request is considered to be over limit.
- repeated RateLimitDescriptor descriptors = 2;
- // Rate limit requests can optionally specify the number of hits a request adds to the matched limit. If the
- // value is not set in the message, a request increases the matched limit by 1.
- uint32 hits_addend = 3;
-}
-
-// A RateLimitDescriptor is a list of hierarchical entries that are used by the service to
-// determine the final rate limit key and overall allowed limit. Here are some examples of how
-// they might be used for the domain "envoy".
-// 1) ["authenticated": "false"], ["ip_address": "10.0.0.1"]
-// What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The
-// configuration supplies a default limit for the ip_address field. If there is a desire to raise
-// the limit for 10.0.0.1 or block it entirely it can be specified directly in the
-// configuration.
-// 2) ["authenticated": "false"], ["path": "/foo/bar"]
-// What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if
-// configured that way in the service).
-// 3) ["authenticated": "false"], ["path": "/foo/bar"], ["ip_address": "10.0.0.1"]
-// What it does: Limits unauthenticated traffic to a specific path for a specific IP address.
-// Like (1) we can raise/block specific IP addresses if we want with an override configuration.
-// 4) ["authenticated": "true"], ["client_id": "foo"]
-// What it does: Limits all traffic for an authenticated client "foo"
-// 5) ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"]
-// What it does: Limits traffic to a specific path for an authenticated client "foo"
-//
-// The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired.
-// This enables building complex application scenarios with a generic backend.
-message RateLimitDescriptor {
- message Entry {
- string key = 1;
- string value = 2;
- }
-
- repeated Entry entries = 1;
-}
-
-// Defines an actual rate limit in terms of requests per unit of time and the unit itself.
-message RateLimit {
- enum Unit {
- UNKNOWN = 0;
- SECOND = 1;
- MINUTE = 2;
- HOUR = 3;
- DAY = 4;
- }
-
- uint32 requests_per_unit = 1;
- Unit unit = 2;
-}
-
-// A response from a ShouldRateLimit call.
-message RateLimitResponse {
- enum Code {
- UNKNOWN = 0;
- OK = 1;
- OVER_LIMIT = 2;
- }
-
- message DescriptorStatus {
- // The response code for an individual descriptor.
- Code code = 1;
- // The current limit as configured by the server. Useful for debugging, etc.
- RateLimit current_limit = 2;
- // The limit remaining in the current time unit.
- uint32 limit_remaining = 3;
- }
-
- // The overall response code which takes into account all of the descriptors that were passed
- // in the RateLimitRequest message.
- Code overall_code = 1;
- // A list of DescriptorStatus messages which matches the length of the descriptor list passed
- // in the RateLimitRequest. This can be used by the caller to determine which individual
- // descriptors failed and/or what the currently configured limits are for all of them.
- repeated DescriptorStatus statuses = 2;
-}
\ No newline at end of file
diff --git a/docker/test-ratelimit/server.js b/docker/test-ratelimit/server.js
deleted file mode 100644
index 99f5f2e233..0000000000
--- a/docker/test-ratelimit/server.js
+++ /dev/null
@@ -1,72 +0,0 @@
-'use strict';
-
-const path = require('path');
-
-const GRPC_PORT = process.env.GRPC_PORT || '5000';
-const USE_TLS = process.env.USE_TLS || false;
-
-const fs = require('fs');
-const SSL_CERT_PATH = path.normalize(__dirname + '/ratelimit.crt');
-const SSL_KEY_PATH = path.normalize(__dirname + '/ratelimit.key');
-
-const grpc = require('grpc');
-const grpcserver = new grpc.Server();
-
-const PROTO_PATH = path.normalize(__dirname + '/ratelimit.proto');
-const ratelimitProto = grpc.load(PROTO_PATH).pb.lyft.ratelimit;
-
-grpcserver.addService(ratelimitProto.RateLimitService.service, {
- shouldRateLimit: (call, callback) => {
- let allow = false;
- const rateLimitResponse = new ratelimitProto.RateLimitResponse();
-
- console.log("========>");
- console.log(call.request.domain);
- call.request.descriptors.forEach((descriptor) => {
- descriptor.entries.forEach((entry) => {
- console.log(` ${entry.key} = ${entry.value}`);
-
- if (entry.key === 'x-ambassador-test-allow' && entry.value === 'true') {
- allow = true;
- }
- });
-
- const descriptorStatus = new ratelimitProto.RateLimitResponse.DescriptorStatus();
- const rateLimit = new ratelimitProto.RateLimit();
- rateLimit.requests_per_unit = 1000;
- rateLimit.unit = ratelimitProto.RateLimit.Unit.SECOND;
- descriptorStatus.code = ratelimitProto.RateLimitResponse.Code.OK;
- descriptorStatus.current_limit = rateLimit;
- descriptorStatus.limit_remaining = Number.MAX_VALUE;
- rateLimitResponse.statuses.push(descriptorStatus);
- });
- if (allow) {
- rateLimitResponse.overall_code = ratelimitProto.RateLimitResponse.Code.OK;
- } else {
- rateLimitResponse.overall_code = ratelimitProto.RateLimitResponse.Code.OVER_LIMIT;
- }
-
- console.log("<========");
- console.log(rateLimitResponse);
- return callback(null, rateLimitResponse);
- }
-});
-
-if (USE_TLS === "true") {
- const cert = fs.readFileSync(SSL_CERT_PATH);
- const key = fs.readFileSync(SSL_KEY_PATH);
- const kvpair = {
- 'private_key': key,
- 'cert_chain': cert
- };
-
- console.log(`TLS enabled, loading cert from ${SSL_CERT_PATH} and key from ${SSL_KEY_PATH}`);
- var serverCredentials = grpc.ServerCredentials.createSsl(null, [kvpair]);
-} else {
- console.log(`TLS disabled, creating insecure credentials`);
- var serverCredentials = grpc.ServerCredentials.createInsecure();
-}
-
-grpcserver.bind(`0.0.0.0:${GRPC_PORT}`, serverCredentials);
-grpcserver.start();
-console.log(`Listening on GRPC port ${GRPC_PORT}, TLS: ${USE_TLS}`);
diff --git a/docker/test-shadow/Dockerfile b/docker/test-shadow/Dockerfile
index c60dcd6ce3..04f139ed66 100644
--- a/docker/test-shadow/Dockerfile
+++ b/docker/test-shadow/Dockerfile
@@ -1,5 +1,5 @@
# The `test-shadow` image gets built by `build-aux/check.mk` for use
-# by `python/kat/harness.py:TestImage`.
+# by `python/tests/ingegration/manifests.py`.
# Copyright 2018 Datawire. All rights reserved.
#
diff --git a/docker/test-stats/Dockerfile b/docker/test-stats/Dockerfile
index 8403d96b75..ed69acbf35 100644
--- a/docker/test-stats/Dockerfile
+++ b/docker/test-stats/Dockerfile
@@ -1,5 +1,5 @@
# The `test-stats` image gets built by `build-aux/check.mk` for use by
-# `python/kat/harness.py:TestImage`.
+# `python/tests/ingegration/manifests.py`.
# Copyright 2018 Datawire. All rights reserved.
#
diff --git a/docs/releaseNotes.yml b/docs/releaseNotes.yml
index 3e706578b4..bf0c0cb332 100644
--- a/docs/releaseNotes.yml
+++ b/docs/releaseNotes.yml
@@ -34,6 +34,13 @@ items:
- version: 2.2.0
date: 'TBD'
notes:
+ - title: Envoy V2 API no longer supported
+ type: change
+ body: >-
+ Support for the Envoy V2 API and the `AMBASSADOR_ENVOY_API_VERSION` environment
+ variable have been removed. Only the Envoy V3 API is supported (this has been the
+ default since Emissary-ingress v1.14.0).
+
- title: Support a log-level metric
type: feature
body: >-
@@ -47,12 +54,24 @@ items:
link: https://github.com/emissary-ingress/emissary/issues/3906
docs: https://www.getambassador.io/docs/edge-stack/latest/topics/running/statistics/8877-metrics/
- - title: Envoy V2 API no longer supported
+ - title: Validate certificates in TLS Secrets
+ type: bugfix
+ body: >-
+ Kubernetes Secrets that should contain TLS certificates are now validated before being
+ accepted for configuration. A Secret that contains an invalid TLS certificate will be logged
+ as an invalid resource.
+ github:
+ - title: 3821
+ link: https://github.com/emissary-ingress/emissary/issues/3821
+ docs: https://github.com/emissary-ingress/emissary/issues/3821
+
+ - title: Emissary watch for Cloud Connect Tokens
type: change
body: >-
- Support for the Envoy V2 API and the `AMBASSADOR_ENVOY_API_VERSION` environment
- variable have been removed. Only the Envoy V3 API is supported (this has been the
- default since Emissary-ingress v1.14.0).
+ Emissary will now watch for ConfigMap or Secret resources specified by the
+ `AGENT_CONFIG_RESOURCE_NAME` environment variable in order to allow all
+ components (and not only the Ambassador Agent) to authenticate requests to
+ Ambassador Cloud.
- title: Support received commands to pause, continue and abort a Rollout via Agent directives
type: feature
diff --git a/k8s-config/create_yaml.py b/k8s-config/create_yaml.py
deleted file mode 100755
index bd494930a6..0000000000
--- a/k8s-config/create_yaml.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-
-# This script is to help generate any flat yaml files from the ambassador helm chart.
-#
-# This script takes two arguments:
-# 1. A multi-doc yaml file generated from running:
-# `helm template ambassador -f [VALUES_FILE.yaml] -n [NAMESPACE] ./charts/emissary-ingress`
-# 2. A yaml file listing the required kubernetes resources from the generated helm template to
-# output to stdout. See ../aes/require.yaml for an example
-#
-# This script will output to stdout the resources from 1) iff they are referenced in 2). It will
-# preserve the ordering from 2), and will error if any resources named in 2) are missing in 1)
-import sys
-import ruamel.yaml
-
-
-NO_NAMESPACE = '__no_namespace'
-
-
-def get_resource_key(resource):
- metadata = resource.get('metadata', {})
- namespace = metadata['namespace'] if 'namespace' in metadata else NO_NAMESPACE
-
- return '{}.{}.{}'.format(resource['kind'], metadata['name'], namespace)
-
-
-def get_requirement_key(req):
- if 'kind' not in req or 'name' not in req:
- raise Exception('Malformed requirement %s' % req)
- ns = req['namespace'] if 'namespace' in req else NO_NAMESPACE
- return '{}.{}.{}'.format(req['kind'], req['name'], ns)
-
-
-def main(templated_helm_file, require_file):
- yaml = ruamel.yaml.YAML()
- yaml.indent(mapping=2)
- with open(templated_helm_file, 'r') as f:
- templated_helm = {}
- for yaml_doc in yaml.load_all(f.read()):
- if yaml_doc is None:
- continue
- templated_helm[get_resource_key(yaml_doc)] = yaml_doc
- with open(require_file, 'r') as f:
- requirements = yaml.load(f.read())
-
- print('# GENERATED FILE: edits made by hand will not be preserved.')
- # Print out required resources in the order they appear in require_file. Order actually matters
- # here, for example, we need the namespace show up before any namespaced resources.
- for requirement in requirements.get('resources'):
- print('---')
- key = get_requirement_key(requirement)
- if key not in templated_helm:
- raise Exception(f'Resource {key} not found in generated yaml (known resources are: {templated_helm.keys()})')
- yaml.dump(templated_helm[key], sys.stdout)
-
-
-if __name__ == '__main__':
- if len(sys.argv) != 3:
- print('USAGE: create_yaml.py [HELM_GENERATED_FILE] [REQUIREMENTS_FILE]')
- sys.exit(1)
- templated_helm = sys.argv[1]
- require_file = sys.argv[2]
-
- main(templated_helm, require_file)
diff --git a/k8s-config/kat-ambassador/values.yaml b/k8s-config/kat-ambassador/values.yaml
index c912049db1..51a68985ea 100644
--- a/k8s-config/kat-ambassador/values.yaml
+++ b/k8s-config/kat-ambassador/values.yaml
@@ -11,7 +11,7 @@ service:
barePod: true
image:
- fullImageOverride: «image»
+ fullImageOverride: «images[emissary]»
containerNameOverride: ambassador
restartPolicy: Always
env:
diff --git a/manifests/emissary/emissary-defaultns.yaml.in b/manifests/emissary/emissary-defaultns.yaml.in
index 8d151db97c..4fc39605c7 100644
--- a/manifests/emissary/emissary-defaultns.yaml.in
+++ b/manifests/emissary/emissary-defaultns.yaml.in
@@ -1,72 +1,63 @@
# GENERATED FILE: edits made by hand will not be preserved.
---
-# Source: emissary-ingress/templates/admin-service.yaml
apiVersion: v1
kind: Service
metadata:
- name: emissary-ingress-admin
- namespace: default
- labels:
- app.kubernetes.io/name: emissary-ingress
-
- app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
- app.kubernetes.io/managed-by: getambassador.io
- # Hard-coded label for Prometheus Operator ServiceMonitor
- service: ambassador-admin
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: None
+ a8r.io/description: The Ambassador Edge Stack admin service for internal use and health checks.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack admin service for internal use and
- health checks.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: None
+ labels:
+ app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
+ product: aes
+ service: ambassador-admin
+ name: emissary-ingress-admin
+ namespace: default
spec:
- type: NodePort
ports:
- - port: 8877
+ - name: ambassador-admin
+ port: 8877
+ protocol: TCP
targetPort: admin
+ - name: ambassador-snapshot
+ port: 8005
protocol: TCP
- name: ambassador-admin
- - port: 8005
targetPort: 8005
- protocol: TCP
- name: ambassador-snapshot
selector:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
+ type: NodePort
---
-# Source: emissary-ingress/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
- name: emissary-ingress
- namespace: default
- labels:
- app.kubernetes.io/name: emissary-ingress
-
- app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
- app.kubernetes.io/managed-by: getambassador.io
- app.kubernetes.io/component: ambassador-service
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: emissary-ingress-redis.default
+ a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways
- and Ingress Controllers with the advanced edge features needed to support developer
- self-service and full-cycle development.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: emissary-ingress-redis.default
+ labels:
+ app.kubernetes.io/component: ambassador-service
+ app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
+ product: aes
+ name: emissary-ingress
+ namespace: default
spec:
- type: LoadBalancer
ports:
- name: http
port: 80
@@ -75,208 +66,208 @@ spec:
port: 443
targetPort: 8443
selector:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
profile: main
+ type: LoadBalancer
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# Aggregate #
-######################################################################
-# This ClusterRole has an empty `rules` and instead sets
-# `aggregationRule` in order to aggregate several other ClusterRoles
-# together, to avoid the need for multiple ClusterRoleBindings.
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: emissary-ingress
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress
rules: []
---
-# Source: emissary-ingress/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
- name: emissary-ingress
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
+ namespace: default
---
-# Source: emissary-ingress/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: emissary-ingress
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: emissary-ingress
subjects:
-- name: emissary-ingress
+- kind: ServiceAccount
+ name: emissary-ingress
namespace: default
- kind: ServiceAccount
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# No namespace #
-######################################################################
-# These ClusterRoles should be limited to resource types that are
-# non-namespaced, and therefore cannot be put in a Role, even if
-# Emissary is in single-namespace mode.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-crd
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress-crd
rules:
-- apiGroups: [apiextensions.k8s.io]
- resources: [customresourcedefinitions]
- verbs: [get, list, watch, delete]
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# All namespaces #
-######################################################################
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-watch
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress-watch
rules:
-- apiGroups: ['']
+- apiGroups:
+ - ""
resources:
- namespaces
- services
- secrets
+ - configmaps
- endpoints
- verbs: [get, list, watch]
-
-- apiGroups: [getambassador.io]
- resources: ['*']
- verbs: [get, list, watch, update, patch, create, delete]
-
-- apiGroups: [getambassador.io]
- resources: [mappings/status]
- verbs: [update]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [clusteringresses, ingresses]
- verbs: [get, list, watch]
-
-- apiGroups: [networking.x-k8s.io]
- resources: ['*']
- verbs: [get, list, watch]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [ingresses/status, clusteringresses/status]
- verbs: [update]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses, ingressclasses]
- verbs: [get, list, watch]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses/status]
- verbs: [update]
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - getambassador.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - create
+ - delete
+- apiGroups:
+ - getambassador.io
+ resources:
+ - mappings/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - ingresses/status
+ - clusteringresses/status
+ verbs:
+ - update
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
---
-# Source: emissary-ingress/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
- name: emissary-ingress
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
+ namespace: default
spec:
+ progressDeadlineSeconds: 600
replicas: 3
selector:
matchLabels:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
strategy:
type: RollingUpdate
-
-
- progressDeadlineSeconds: 600
template:
metadata:
+ annotations:
+ consul.hashicorp.com/connect-inject: "false"
+ sidecar.istio.io/inject: "false"
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
profile: main
- annotations:
- consul.hashicorp.com/connect-inject: 'false'
- sidecar.istio.io/inject: 'false'
spec:
- terminationGracePeriodSeconds: 0
- securityContext:
- runAsUser: 8888
- restartPolicy: Always
- serviceAccountName: emissary-ingress
- volumes:
- - name: ambassador-pod-info
- downwardAPI:
- items:
- - fieldRef:
- fieldPath: metadata.labels
- path: labels
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels:
+ service: ambassador
+ topologyKey: kubernetes.io/hostname
+ weight: 100
containers:
- - name: ambassador
- image: $imageRepo$:$version$
- imagePullPolicy: IfNotPresent
- ports:
- - name: http
- containerPort: 8080
- - name: https
- containerPort: 8443
- - name: admin
- containerPort: 8877
- env:
+ - env:
- name: HOST_IP
valueFrom:
fieldRef:
@@ -285,26 +276,30 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- securityContext:
- allowPrivilegeEscalation: false
+ image: $imageRepo$:$version$
+ imagePullPolicy: IfNotPresent
livenessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_alive
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
+ name: ambassador
+ ports:
+ - containerPort: 8080
+ name: http
+ - containerPort: 8443
+ name: https
+ - containerPort: 8877
+ name: admin
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_ready
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
- volumeMounts:
- - name: ambassador-pod-info
- mountPath: /tmp/ambassador-pod-info
- readOnly: true
resources:
limits:
cpu: 1
@@ -312,45 +307,50 @@ spec:
requests:
cpu: 200m
memory: 100Mi
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchLabels:
- service: ambassador
- topologyKey: kubernetes.io/hostname
- weight: 100
- imagePullSecrets: []
+ securityContext:
+ allowPrivilegeEscalation: false
+ volumeMounts:
+ - mountPath: /tmp/ambassador-pod-info
+ name: ambassador-pod-info
+ readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: false
+ imagePullSecrets: []
+ restartPolicy: Always
+ securityContext:
+ runAsUser: 8888
+ serviceAccountName: emissary-ingress
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - downwardAPI:
+ items:
+ - fieldRef:
+ fieldPath: metadata.labels
+ path: labels
+ name: ambassador-pod-info
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
- name: emissary-ingress-agent
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
+ namespace: default
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: emissary-ingress-agent
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -360,163 +360,182 @@ subjects:
name: emissary-ingress-agent
namespace: default
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: emissary-ingress-agent
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent
rules: []
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-pods
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-pods
rules:
-- apiGroups: ['']
- resources: [pods]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-rollouts
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-rollouts
rules:
-- apiGroups: [argoproj.io]
- resources: [rollouts]
- verbs: [get, list, watch, patch]
+- apiGroups:
+ - argoproj.io
+ resources:
+ - rollouts
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-applications
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-applications
rules:
-- apiGroups: [argoproj.io]
- resources: [applications]
- verbs: [get, list, watch]
+- apiGroups:
+ - argoproj.io
+ resources:
+ - applications
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-deployments
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-deployments
rules:
-- apiGroups: [apps, extensions]
- resources: [deployments]
- verbs: [get, list, watch]
+- apiGroups:
+ - apps
+ - extensions
+ resources:
+ - deployments
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-endpoints
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-endpoints
rules:
-- apiGroups: ['']
- resources: [endpoints]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-configmaps
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-configmaps
rules:
-- apiGroups: ['']
- resources: [configmaps]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
- name: emissary-ingress-agent-config
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent-config
+ namespace: default
rules:
-- apiGroups: ['']
- resources: [configmaps]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
- name: emissary-ingress-agent-config
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent-config
+ namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -526,41 +545,36 @@ subjects:
name: emissary-ingress-agent
namespace: default
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
- name: emissary-ingress-agent
- namespace: default
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
+ namespace: default
spec:
+ progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
- app.kubernetes.io/name: emissary-ingress-agent
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress-agent
template:
metadata:
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
spec:
- serviceAccountName: emissary-ingress-agent
containers:
- - name: agent
- image: $imageRepo$:$version$
- imagePullPolicy: IfNotPresent
- command: [agent]
+ - command:
+ - agent
env:
- name: AGENT_NAMESPACE
valueFrom:
@@ -572,4 +586,7 @@ spec:
value: https://app.getambassador.io/
- name: AES_SNAPSHOT_URL
value: http://emissary-ingress-admin.default:8005/snapshot-external
- progressDeadlineSeconds: 600
+ image: $imageRepo$:$version$
+ imagePullPolicy: IfNotPresent
+ name: agent
+ serviceAccountName: emissary-ingress-agent
diff --git a/manifests/emissary/emissary-emissaryns.yaml.in b/manifests/emissary/emissary-emissaryns.yaml.in
index 21b92d17d4..8225f64702 100644
--- a/manifests/emissary/emissary-emissaryns.yaml.in
+++ b/manifests/emissary/emissary-emissaryns.yaml.in
@@ -1,72 +1,63 @@
# GENERATED FILE: edits made by hand will not be preserved.
---
-# Source: emissary-ingress/templates/admin-service.yaml
apiVersion: v1
kind: Service
metadata:
- name: emissary-ingress-admin
- namespace: emissary
- labels:
- app.kubernetes.io/name: emissary-ingress
-
- app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
- app.kubernetes.io/managed-by: getambassador.io
- # Hard-coded label for Prometheus Operator ServiceMonitor
- service: ambassador-admin
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: None
+ a8r.io/description: The Ambassador Edge Stack admin service for internal use and health checks.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack admin service for internal use and
- health checks.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: None
+ labels:
+ app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
+ product: aes
+ service: ambassador-admin
+ name: emissary-ingress-admin
+ namespace: emissary
spec:
- type: NodePort
ports:
- - port: 8877
+ - name: ambassador-admin
+ port: 8877
+ protocol: TCP
targetPort: admin
+ - name: ambassador-snapshot
+ port: 8005
protocol: TCP
- name: ambassador-admin
- - port: 8005
targetPort: 8005
- protocol: TCP
- name: ambassador-snapshot
selector:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
+ type: NodePort
---
-# Source: emissary-ingress/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
- name: emissary-ingress
- namespace: emissary
- labels:
- app.kubernetes.io/name: emissary-ingress
-
- app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
- app.kubernetes.io/managed-by: getambassador.io
- app.kubernetes.io/component: ambassador-service
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: emissary-ingress-redis.emissary
+ a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways
- and Ingress Controllers with the advanced edge features needed to support developer
- self-service and full-cycle development.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: emissary-ingress-redis.emissary
+ labels:
+ app.kubernetes.io/component: ambassador-service
+ app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
+ product: aes
+ name: emissary-ingress
+ namespace: emissary
spec:
- type: LoadBalancer
ports:
- name: http
port: 80
@@ -75,208 +66,208 @@ spec:
port: 443
targetPort: 8443
selector:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
profile: main
+ type: LoadBalancer
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# Aggregate #
-######################################################################
-# This ClusterRole has an empty `rules` and instead sets
-# `aggregationRule` in order to aggregate several other ClusterRoles
-# together, to avoid the need for multiple ClusterRoleBindings.
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: emissary-ingress
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress
rules: []
---
-# Source: emissary-ingress/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
- name: emissary-ingress
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
+ namespace: emissary
---
-# Source: emissary-ingress/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: emissary-ingress
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: emissary-ingress
subjects:
-- name: emissary-ingress
+- kind: ServiceAccount
+ name: emissary-ingress
namespace: emissary
- kind: ServiceAccount
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# No namespace #
-######################################################################
-# These ClusterRoles should be limited to resource types that are
-# non-namespaced, and therefore cannot be put in a Role, even if
-# Emissary is in single-namespace mode.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-crd
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress-crd
rules:
-- apiGroups: [apiextensions.k8s.io]
- resources: [customresourcedefinitions]
- verbs: [get, list, watch, delete]
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# All namespaces #
-######################################################################
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-watch
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
rbac.getambassador.io/role-group: emissary-ingress
+ name: emissary-ingress-watch
rules:
-- apiGroups: ['']
+- apiGroups:
+ - ""
resources:
- namespaces
- services
- secrets
+ - configmaps
- endpoints
- verbs: [get, list, watch]
-
-- apiGroups: [getambassador.io]
- resources: ['*']
- verbs: [get, list, watch, update, patch, create, delete]
-
-- apiGroups: [getambassador.io]
- resources: [mappings/status]
- verbs: [update]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [clusteringresses, ingresses]
- verbs: [get, list, watch]
-
-- apiGroups: [networking.x-k8s.io]
- resources: ['*']
- verbs: [get, list, watch]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [ingresses/status, clusteringresses/status]
- verbs: [update]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses, ingressclasses]
- verbs: [get, list, watch]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses/status]
- verbs: [update]
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - getambassador.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - create
+ - delete
+- apiGroups:
+ - getambassador.io
+ resources:
+ - mappings/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - ingresses/status
+ - clusteringresses/status
+ verbs:
+ - update
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
---
-# Source: emissary-ingress/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
- name: emissary-ingress
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress
+ namespace: emissary
spec:
+ progressDeadlineSeconds: 600
replicas: 3
selector:
matchLabels:
- app.kubernetes.io/name: emissary-ingress
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress
strategy:
type: RollingUpdate
-
-
- progressDeadlineSeconds: 600
template:
metadata:
+ annotations:
+ consul.hashicorp.com/connect-inject: "false"
+ sidecar.istio.io/inject: "false"
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
profile: main
- annotations:
- consul.hashicorp.com/connect-inject: 'false'
- sidecar.istio.io/inject: 'false'
spec:
- terminationGracePeriodSeconds: 0
- securityContext:
- runAsUser: 8888
- restartPolicy: Always
- serviceAccountName: emissary-ingress
- volumes:
- - name: ambassador-pod-info
- downwardAPI:
- items:
- - fieldRef:
- fieldPath: metadata.labels
- path: labels
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels:
+ service: ambassador
+ topologyKey: kubernetes.io/hostname
+ weight: 100
containers:
- - name: ambassador
- image: $imageRepo$:$version$
- imagePullPolicy: IfNotPresent
- ports:
- - name: http
- containerPort: 8080
- - name: https
- containerPort: 8443
- - name: admin
- containerPort: 8877
- env:
+ - env:
- name: HOST_IP
valueFrom:
fieldRef:
@@ -285,26 +276,30 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- securityContext:
- allowPrivilegeEscalation: false
+ image: $imageRepo$:$version$
+ imagePullPolicy: IfNotPresent
livenessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_alive
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
+ name: ambassador
+ ports:
+ - containerPort: 8080
+ name: http
+ - containerPort: 8443
+ name: https
+ - containerPort: 8877
+ name: admin
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_ready
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
- volumeMounts:
- - name: ambassador-pod-info
- mountPath: /tmp/ambassador-pod-info
- readOnly: true
resources:
limits:
cpu: 1
@@ -312,45 +307,50 @@ spec:
requests:
cpu: 200m
memory: 100Mi
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - podAffinityTerm:
- labelSelector:
- matchLabels:
- service: ambassador
- topologyKey: kubernetes.io/hostname
- weight: 100
- imagePullSecrets: []
+ securityContext:
+ allowPrivilegeEscalation: false
+ volumeMounts:
+ - mountPath: /tmp/ambassador-pod-info
+ name: ambassador-pod-info
+ readOnly: true
dnsPolicy: ClusterFirst
hostNetwork: false
+ imagePullSecrets: []
+ restartPolicy: Always
+ securityContext:
+ runAsUser: 8888
+ serviceAccountName: emissary-ingress
+ terminationGracePeriodSeconds: 0
+ volumes:
+ - downwardAPI:
+ items:
+ - fieldRef:
+ fieldPath: metadata.labels
+ path: labels
+ name: ambassador-pod-info
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
- name: emissary-ingress-agent
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
+ namespace: emissary
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: emissary-ingress-agent
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@@ -360,163 +360,182 @@ subjects:
name: emissary-ingress-agent
namespace: emissary
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: emissary-ingress-agent
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent
rules: []
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-pods
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-pods
rules:
-- apiGroups: ['']
- resources: [pods]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-rollouts
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-rollouts
rules:
-- apiGroups: [argoproj.io]
- resources: [rollouts]
- verbs: [get, list, watch, patch]
+- apiGroups:
+ - argoproj.io
+ resources:
+ - rollouts
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-applications
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-applications
rules:
-- apiGroups: [argoproj.io]
- resources: [applications]
- verbs: [get, list, watch]
+- apiGroups:
+ - argoproj.io
+ resources:
+ - applications
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-deployments
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-deployments
rules:
-- apiGroups: [apps, extensions]
- resources: [deployments]
- verbs: [get, list, watch]
+- apiGroups:
+ - apps
+ - extensions
+ resources:
+ - deployments
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-endpoints
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-endpoints
rules:
-- apiGroups: ['']
- resources: [endpoints]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: emissary-ingress-agent-configmaps
labels:
- rbac.getambassador.io/role-group: emissary-ingress-agent
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ rbac.getambassador.io/role-group: emissary-ingress-agent
+ name: emissary-ingress-agent-configmaps
rules:
-- apiGroups: ['']
- resources: [configmaps]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
- name: emissary-ingress-agent-config
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent-config
+ namespace: emissary
rules:
-- apiGroups: ['']
- resources: [configmaps]
- verbs: [get, list, watch]
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - get
+ - list
+ - watch
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
- name: emissary-ingress-agent-config
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent-config
+ namespace: emissary
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
@@ -526,41 +545,36 @@ subjects:
name: emissary-ingress-agent
namespace: emissary
---
-# Source: emissary-ingress/templates/ambassador-agent.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
- name: emissary-ingress-agent
- namespace: emissary
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
+ name: emissary-ingress-agent
+ namespace: emissary
spec:
+ progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
- app.kubernetes.io/name: emissary-ingress-agent
app.kubernetes.io/instance: emissary-ingress
+ app.kubernetes.io/name: emissary-ingress-agent
template:
metadata:
labels:
- app.kubernetes.io/name: emissary-ingress-agent
-
app.kubernetes.io/instance: emissary-ingress
- app.kubernetes.io/part-of: emissary-ingress
app.kubernetes.io/managed-by: getambassador.io
+ app.kubernetes.io/name: emissary-ingress-agent
+ app.kubernetes.io/part-of: emissary-ingress
product: aes
spec:
- serviceAccountName: emissary-ingress-agent
containers:
- - name: agent
- image: $imageRepo$:$version$
- imagePullPolicy: IfNotPresent
- command: [agent]
+ - command:
+ - agent
env:
- name: AGENT_NAMESPACE
valueFrom:
@@ -572,4 +586,7 @@ spec:
value: https://app.getambassador.io/
- name: AES_SNAPSHOT_URL
value: http://emissary-ingress-admin.emissary:8005/snapshot-external
- progressDeadlineSeconds: 600
+ image: $imageRepo$:$version$
+ imagePullPolicy: IfNotPresent
+ name: agent
+ serviceAccountName: emissary-ingress-agent
diff --git a/pkg/snapshot/v1/types.go b/pkg/snapshot/v1/types.go
index d8f1748e03..fa43cf8306 100644
--- a/pkg/snapshot/v1/types.go
+++ b/pkg/snapshot/v1/types.go
@@ -96,14 +96,15 @@ type KubernetesSnapshot struct {
FSSecrets map[SecretRef]*kates.Secret `json:"-"` // Secrets from the filesystem
Secrets []*kates.Secret `json:"secret"` // Secrets we'll feed to Ambassador
+ ConfigMaps []*kates.ConfigMap `json:"ConfigMaps,omitempty"`
+
// [kind/name.namespace][]kates.Object
Annotations map[string]AnnotationList `json:"annotations"`
- // Pods, Deployments and ConfigMaps were added to be used by Ambassador Agent so it can
+ // Pods and Deployments were added to be used by Ambassador Agent so it can
// report to AgentCom in Ambassador Cloud.
Pods []*kates.Pod `json:"Pods,omitempty"`
Deployments []*kates.Deployment `json:"Deployments,omitempty"`
- ConfigMaps []*kates.ConfigMap `json:"ConfigMaps,omitempty"`
// ArgoRollouts represents the argo-rollout CRD state of the world that may or may not be present
// in the client's cluster. For this reason, Rollouts resources are fetched making use of the
diff --git a/python/ambassador/config/config.py b/python/ambassador/config/config.py
index 74b3720ef4..de02189249 100644
--- a/python/ambassador/config/config.py
+++ b/python/ambassador/config/config.py
@@ -451,21 +451,20 @@ def validate_object(self, resource: ACResource) -> RichStatus:
ns = resource.get('namespace') or self.ambassador_namespace
name = f"{resource.name} ns {ns}"
- # Did entrypoint.go flag errors here? (in a later version we'll short-circuit earlier, but
- # for now we're going to re-validate as a sanity check.)
+ # Did entrypoint.go flag errors here that we should show to the user?
#
# (It's still called watt_errors because our other docs talk about "watt
# snapshots", and I'm OK with retaining that name for the format.)
if 'errors' in resource:
- # Pop the errors out of this resource, since we can't validate in Python
- # while it's present!
+ # Pop the errors out of this resource...
errors = resource.pop('errors').split('\n')
- # This weird list comprehension around 'errors' is just filtering out any
- # empty lines.
+ # ...strip any empty lines in the error list with this one weird list
+ # comprehension...
watt_errors = '; '.join([error for error in errors if error])
- if watt_errors: # check that it's not an empty string
+ # ...and, assuming that we're left with any error message, post it.
+ if watt_errors:
return RichStatus.fromError(watt_errors)
return RichStatus.OK(msg=f"good {resource.kind}")
diff --git a/python/ambassador/fetch/fetcher.py b/python/ambassador/fetch/fetcher.py
index 7ec050f0f8..7054756d10 100644
--- a/python/ambassador/fetch/fetcher.py
+++ b/python/ambassador/fetch/fetcher.py
@@ -350,7 +350,7 @@ def sorted(self, key=lambda x: x.rkey): # returns an iterator, probably
return sorted(self.elements, key=key)
def handle_k8s(self, raw_obj: dict) -> None:
- # self.logger.debug("handle_k8s obj %s" % dump_json(obj, pretty=True))
+ # self.logger.debug("handle_k8s obj %s" % dump_json(raw_obj, pretty=True))
try:
obj = KubernetesObject(raw_obj)
diff --git a/python/ambassador/fetch/secret.py b/python/ambassador/fetch/secret.py
index a02a922fdf..1a8196d823 100644
--- a/python/ambassador/fetch/secret.py
+++ b/python/ambassador/fetch/secret.py
@@ -7,6 +7,7 @@
from .k8sprocessor import ManagedKubernetesProcessor
from .resource import NormalizedResource, ResourceManager
+from ..utils import dump_json
class SecretProcessor (ManagedKubernetesProcessor):
"""
@@ -43,6 +44,8 @@ def _admit(self, obj: KubernetesObject) -> bool:
return super()._admit(obj)
def _process(self, obj: KubernetesObject) -> None:
+ # self.logger.debug("processing K8s Secret %s", dump_json(dict(obj), pretty=True))
+
secret_type = obj.get('type')
if secret_type not in self.KNOWN_TYPES:
self.logger.debug("ignoring K8s Secret with unknown type %s" % secret_type)
@@ -72,4 +75,5 @@ def _process(self, obj: KubernetesObject) -> None:
namespace=obj.namespace,
labels=obj.labels,
spec=spec,
+ errors=obj.get('errors'), # Make sure we preserve errors here!
))
diff --git a/python/kat/harness.py b/python/kat/harness.py
index 26f201fbba..d059438251 100755
--- a/python/kat/harness.py
+++ b/python/kat/harness.py
@@ -23,6 +23,7 @@
from yaml.scanner import ScannerError as YAMLScanError
+import tests.integration.manifests as integration_manifests
from multi import multi
from .parser import dump, load, Tag
from tests.manifests import httpbin_manifests, websocket_echo_server_manifests, cleartext_host_manifest, default_listener_manifest
@@ -65,7 +66,6 @@
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/apro"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/gold")
- MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "tests/pytest/manifests")
else:
# We're either not running in Edge Stack or we're not sure, so just assume OSS.
print("RUNNING IN OSS")
@@ -74,41 +74,7 @@
if not SOURCE_ROOT:
SOURCE_ROOT = "/buildroot/ambassador"
GOLD_ROOT = os.path.join(SOURCE_ROOT, "python/tests/gold")
- MANIFEST_ROOT = os.path.join(SOURCE_ROOT, "python/tests/integration/manifests")
-def load_manifest(manifest_name: str) -> str:
- return open(os.path.join(MANIFEST_ROOT, f"{manifest_name.lower()}.yaml"), "r").read()
-
-
-class TestImage:
- def __init__(self, *args, **kwargs) -> None:
- self.images: Dict[str, str] = {}
-
- svc_names = ['auth', 'ratelimit', 'shadow', 'stats']
-
- try:
- subprocess.run(['make']+[f'docker/test-{svc}.docker.push.remote' for svc in svc_names],
- check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
- except subprocess.CalledProcessError as err:
- raise Exception(f"{err.stdout}{err}") from err
-
- for svc in svc_names:
- with open(f'docker/test-{svc}.docker.push.remote', 'r') as fh:
- # file contents:
- # line 1: image ID
- # line 2: tag 1
- # line 3: tag 2
- # ...
- #
- # Set 'image' to one of the tags.
- image = fh.readlines()[1].strip()
- self.images[svc] = image
-
- def __getitem__(self, key: str) -> str:
- return self.images[key]
-
-
-GLOBAL_TEST_IMAGE = TestImage()
def run(cmd):
status = os.system(cmd)
@@ -372,8 +338,6 @@ def __init__(self, *args, **kwargs) -> None:
self.skip_node = False
self.xfail = None
- self.test_image = GLOBAL_TEST_IMAGE
-
name = kwargs.pop("name", None)
if 'namespace' in kwargs:
@@ -609,13 +573,7 @@ def depth(self):
return self.parent.depth + 1
def format(self, st, **kwargs):
- serviceAccountExtra = ''
- if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
- serviceAccountExtra = """
-imagePullSecrets:
-- name: dev-image-pull-secret
-"""
- return st.format(self=self, environ=os.environ, serviceAccountExtra=serviceAccountExtra, **kwargs)
+ return integration_manifests.format(st, self=self, **kwargs)
def get_fqdn(self, name: str) -> str:
if self.namespace and (self.namespace != 'default'):
@@ -1113,8 +1071,7 @@ def allocate(self, service_name) -> List[int]:
return ports
def get_manifest_list(self) -> List[Dict[str, Any]]:
- SUPERPOD_POD = load_manifest("superpod_pod")
- manifest = load('superpod', SUPERPOD_POD.format(environ=os.environ), Tag.MAPPING)
+ manifest = load('superpod', integration_manifests.format(integration_manifests.load("superpod_pod")), Tag.MAPPING)
assert len(manifest) == 1, "SUPERPOD manifest must have exactly one object"
@@ -1276,12 +1233,11 @@ def get_manifests_and_namespaces(self, selected) -> Tuple[Any, List[str]]:
# print(f'superpodifying {n.name}')
- # Next up: use the BACKEND_SERVICE manifest as a template...
- BACKEND_SERVICE = load_manifest("backend_service")
- yaml = n.format(BACKEND_SERVICE)
+ # Next up: use the backend_service.yaml manifest as a template...
+ yaml = n.format(integration_manifests.load("backend_service"))
manifest = load(n.path, yaml, Tag.MAPPING)
- assert len(manifest) == 1, "BACKEND_SERVICE manifest must have exactly one object"
+ assert len(manifest) == 1, "backend_service.yaml manifest must have exactly one object"
m = manifest[0]
@@ -1532,24 +1488,9 @@ def _setup_k8s(self, selected):
manifest_changed, manifest_reason = has_changed(yaml, fname)
# First up: CRDs.
- serviceAccountExtra = ''
- if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
- serviceAccountExtra = """
-imagePullSecrets:
-- name: dev-image-pull-secret
-"""
-
- # Use .replace instead of .format because there are other '{word}' things in 'description'
- # fields that would cause KeyErrors when .format erroneously tries to evaluate them.
- input_crds = (
- load_manifest("crds")
- .replace('{image}', os.environ["AMBASSADOR_DOCKER_IMAGE"])
- .replace('{serviceAccountExtra}', serviceAccountExtra)
- )
-
+ input_crds = integration_manifests.CRDmanifests
if is_knative_compatible():
- KNATIVE_SERVING_CRDS = load_manifest("knative_serving_crds")
- input_crds += KNATIVE_SERVING_CRDS
+ input_crds += integration_manifests.load("knative_serving_crds")
# Strip out all of the schema validation, so that we can test with broken CRDs.
# (KAT isn't really in the business of testing to be sure that Kubernetes can
@@ -1622,10 +1563,10 @@ def _setup_k8s(self, selected):
print(f'CRDS unchanged {reason}, skipping apply.')
# Next up: the KAT pod.
- KAT_CLIENT_POD = load_manifest("kat_client_pod")
+ kat_client_manifests = integration_manifests.load("kat_client_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
- KAT_CLIENT_POD = namespace_manifest("default") + KAT_CLIENT_POD
- changed, reason = has_changed(KAT_CLIENT_POD.format(environ=os.environ), "/tmp/k8s-kat-pod.yaml")
+ kat_client_manifests = namespace_manifest("default") + kat_client_manifests
+ changed, reason = has_changed(integration_manifests.format(kat_client_manifests), "/tmp/k8s-kat-pod.yaml")
if changed:
print(f'KAT pod definition changed ({reason}), applying')
@@ -1655,10 +1596,10 @@ def _setup_k8s(self, selected):
# Use a dummy pod to get around the !*@$!*@ DockerHub rate limit.
# XXX Better: switch to GCR.
- dummy_pod = load_manifest("dummy_pod")
+ dummy_pod = integration_manifests.load("dummy_pod")
if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
dummy_pod = namespace_manifest("default") + dummy_pod
- changed, reason = has_changed(dummy_pod.format(environ=os.environ), "/tmp/k8s-dummy-pod.yaml")
+ changed, reason = has_changed(integration_manifests.format(dummy_pod), "/tmp/k8s-dummy-pod.yaml")
if changed:
print(f'Dummy pod definition changed ({reason}), applying')
diff --git a/python/tests/__init__.py b/python/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/python/tests/integration/__init__.py b/python/tests/integration/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/python/tests/integration/manifests.py b/python/tests/integration/manifests.py
new file mode 100644
index 0000000000..30e1adf918
--- /dev/null
+++ b/python/tests/integration/manifests.py
@@ -0,0 +1,70 @@
+import os
+import subprocess
+from typing import Dict, Final
+
+def _get_images() -> Dict[str, str]:
+ ret: Dict[str, str] = {}
+
+ # Keep this list in-sync with the 'push-pytest-images' Makefile target.
+ image_names = [
+ 'test-auth',
+ 'test-shadow',
+ 'test-stats',
+ 'kat-client',
+ 'kat-server',
+ ]
+
+ if image := os.environ.get('AMBASSADOR_DOCKER_IMAGE'):
+ ret['emissary'] = image
+ else:
+ image_names.append('emissary')
+
+ try:
+ subprocess.run(['make']+[f'docker/{name}.docker.push.remote' for name in image_names],
+ check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
+ except subprocess.CalledProcessError as err:
+ raise Exception(f"{err.stdout}{err}") from err
+
+ for name in image_names:
+ with open(f'docker/{name}.docker.push.remote', 'r') as fh:
+ # file contents:
+ # line 1: image ID
+ # line 2: tag 1
+ # line 3: tag 2
+ # ...
+ tag = fh.readlines()[1].strip()
+ ret[name] = tag
+
+ return ret
+
+images: Final = _get_images()
+
+_file_cache: Dict[str, str] = {}
+
+def load(manifest_name: str) -> str:
+ if manifest_name in _file_cache:
+ return _file_cache[manifest_name]
+ manifest_dir = __file__[:-len('.py')]
+ manifest_file = os.path.join(manifest_dir, manifest_name+'.yaml')
+ manifest_content = open(manifest_file, 'r').read()
+ _file_cache[manifest_name] = manifest_content
+ return manifest_content
+
+def format(st: str, /, **kwargs):
+ serviceAccountExtra = ''
+ if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
+ serviceAccountExtra = """
+imagePullSecrets:
+- name: dev-image-pull-secret
+"""
+ return st.format(serviceAccountExtra=serviceAccountExtra,
+ images=images,
+ **kwargs)
+
+# Use .replace instead of .format because there are other '{word}' things in 'description' fields
+# that would cause KeyErrors when .format erroneously tries to evaluate them.
+CRDmanifests: Final[str] = (
+ load('crds')
+ .replace('{images[emissary]}', images['emissary'])
+ .replace('{serviceAccountExtra}', format('{serviceAccountExtra}'))
+)
diff --git a/python/tests/integration/manifests/ambassador.yaml b/python/tests/integration/manifests/ambassador.yaml
index a942226c6a..fecd310cfa 100644
--- a/python/tests/integration/manifests/ambassador.yaml
+++ b/python/tests/integration/manifests/ambassador.yaml
@@ -1,32 +1,27 @@
# GENERATED FILE: edits made by hand will not be preserved.
---
-# Source: emissary-ingress/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
- labels:
- app.kubernetes.io/name: kat-ambassador
-
- app.kubernetes.io/instance: kat-ambassador
- app.kubernetes.io/part-of: kat-ambassador
- app.kubernetes.io/managed-by: kat
- app.kubernetes.io/component: ambassador-service
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: {self.path.k8s}-redis.{self.namespace}
+ a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways
- and Ingress Controllers with the advanced edge features needed to support developer
- self-service and full-cycle development.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: {self.path.k8s}-redis.{self.namespace}
+ labels:
+ app.kubernetes.io/component: ambassador-service
+ app.kubernetes.io/instance: kat-ambassador
+ app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: kat-ambassador
+ app.kubernetes.io/part-of: kat-ambassador
+ product: aes
+ name: {self.path.k8s}
+ namespace: {self.namespace}
spec:
- type: ClusterIP
ports:
- name: http
port: 80
@@ -36,93 +31,63 @@ spec:
targetPort: 8443
{extra_ports}
selector:
- service: {self.path.k8s}
profile: main
+ service: {self.path.k8s}
+ type: ClusterIP
---
-# Source: emissary-ingress/templates/admin-service.yaml
apiVersion: v1
kind: Service
metadata:
- name: {self.path.k8s}-admin
- namespace: {self.namespace}
- labels:
- app.kubernetes.io/name: kat-ambassador
-
- app.kubernetes.io/instance: kat-ambassador
- app.kubernetes.io/part-of: kat-ambassador
- app.kubernetes.io/managed-by: kat
- # Hard-coded label for Prometheus Operator ServiceMonitor
- service: ambassador-admin
- product: aes
annotations:
+ a8r.io/bugs: https://github.com/datawire/ambassador/issues
+ a8r.io/chat: http://a8r.io/Slack
+ a8r.io/dependencies: None
+ a8r.io/description: The Ambassador Edge Stack admin service for internal use and health checks.
+ a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
a8r.io/owner: Ambassador Labs
a8r.io/repository: github.com/datawire/ambassador
- a8r.io/description: The Ambassador Edge Stack admin service for internal use and
- health checks.
- a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/
- a8r.io/chat: http://a8r.io/Slack
- a8r.io/bugs: https://github.com/datawire/ambassador/issues
a8r.io/support: https://www.getambassador.io/about-us/support/
- a8r.io/dependencies: None
+ labels:
+ app.kubernetes.io/instance: kat-ambassador
+ app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: kat-ambassador
+ app.kubernetes.io/part-of: kat-ambassador
+ product: aes
+ service: ambassador-admin
+ name: {self.path.k8s}-admin
+ namespace: {self.namespace}
spec:
- type: ClusterIP
ports:
- - port: 8877
+ - name: ambassador-admin
+ port: 8877
+ protocol: TCP
targetPort: admin
+ - name: ambassador-snapshot
+ port: 8005
protocol: TCP
- name: ambassador-admin
- - port: 8005
targetPort: 8005
- protocol: TCP
- name: ambassador-snapshot
selector:
service: {self.path.k8s}
+ type: ClusterIP
---
-# Source: emissary-ingress/templates/deployment.yaml
apiVersion: v1
kind: Pod
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
+ annotations:
+ checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
labels:
- service: {self.path.k8s}
- app.kubernetes.io/name: kat-ambassador
-
app.kubernetes.io/instance: kat-ambassador
- app.kubernetes.io/part-of: kat-ambassador
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: kat-ambassador
+ app.kubernetes.io/part-of: kat-ambassador
product: aes
profile: main
- annotations:
- checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
+ service: {self.path.k8s}
+ name: {self.path.k8s}
+ namespace: {self.namespace}
spec:
- securityContext:
- runAsUser: 8888
- restartPolicy: Always
- serviceAccountName: {self.path.k8s}
- volumes:
- - name: ambassador-pod-info
- downwardAPI:
- items:
- - fieldRef:
- fieldPath: metadata.labels
- path: labels
- - emptyDir:
- medium: Memory
- sizeLimit: 45Mi
- name: scratchpad
containers:
- - name: ambassador
- image: {image}
- imagePullPolicy: IfNotPresent
- ports:
- - name: http
- containerPort: 8080
- - name: https
- containerPort: 8443
- - name: admin
- containerPort: 8877
- env:
+ - env:
- name: HOST_IP
valueFrom:
fieldRef:
@@ -131,38 +96,65 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: AGENT_CONFIG_RESOURCE_NAME
+ value: {self.path.k8s}-agent-cloud-token
- name: AMBASSADOR_CONFIG_BASE_DIR
value: /tmp/ambassador
- name: AMBASSADOR_ID
value: {self.path.k8s}
- name: AMBASSADOR_SNAPSHOT_COUNT
- value: '0'
+ value: "0"
{envs}
- securityContext:
- {capabilities_block}
- allowPrivilegeEscalation: false
- readOnlyRootFilesystem: true
+ image: {images[emissary]}
+ imagePullPolicy: IfNotPresent
livenessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_alive
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
+ name: ambassador
+ ports:
+ - containerPort: 8080
+ name: http
+ - containerPort: 8443
+ name: https
+ - containerPort: 8877
+ name: admin
readinessProbe:
+ failureThreshold: 3
httpGet:
path: /ambassador/v0/check_ready
port: admin
- failureThreshold: 3
initialDelaySeconds: 30
periodSeconds: 3
+ resources: null
+ securityContext:
+ {capabilities_block}
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: true
volumeMounts:
- - name: ambassador-pod-info
- mountPath: /tmp/ambassador-pod-info
+ - mountPath: /tmp/ambassador-pod-info
+ name: ambassador-pod-info
readOnly: true
- mountPath: /tmp/
name: scratchpad
- resources:
- imagePullSecrets: []
dnsPolicy: ClusterFirst
hostNetwork: false
+ imagePullSecrets: []
+ restartPolicy: Always
+ securityContext:
+ runAsUser: 8888
+ serviceAccountName: {self.path.k8s}
+ volumes:
+ - downwardAPI:
+ items:
+ - fieldRef:
+ fieldPath: metadata.labels
+ path: labels
+ name: ambassador-pod-info
+ - emptyDir:
+ medium: Memory
+ sizeLimit: 45Mi
+ name: scratchpad
diff --git a/python/tests/integration/manifests/auth_backend.yaml b/python/tests/integration/manifests/auth_backend.yaml
index 015c59f1ad..2c0e305ace 100644
--- a/python/tests/integration/manifests/auth_backend.yaml
+++ b/python/tests/integration/manifests/auth_backend.yaml
@@ -26,7 +26,7 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
diff --git a/python/tests/integration/manifests/backend.yaml b/python/tests/integration/manifests/backend.yaml
index d4f15729cf..20d66c3635 100644
--- a/python/tests/integration/manifests/backend.yaml
+++ b/python/tests/integration/manifests/backend.yaml
@@ -27,7 +27,7 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
diff --git a/python/tests/integration/manifests/crds.yaml b/python/tests/integration/manifests/crds.yaml
index 7ebaf58ff8..3d85a590c8 100644
--- a/python/tests/integration/manifests/crds.yaml
+++ b/python/tests/integration/manifests/crds.yaml
@@ -3894,7 +3894,7 @@ spec:
serviceAccountName: emissary-apiext
containers:
- name: emissary-apiext
- image: {image}
+ image: {images[emissary]}
imagePullPolicy: IfNotPresent
command: [ "apiext", "emissary-apiext" ]
ports:
diff --git a/python/tests/integration/manifests/dummy_pod.yaml b/python/tests/integration/manifests/dummy_pod.yaml
index bbcf176fd0..0a128d4478 100644
--- a/python/tests/integration/manifests/dummy_pod.yaml
+++ b/python/tests/integration/manifests/dummy_pod.yaml
@@ -22,7 +22,7 @@ spec:
fieldPath: metadata.labels
containers:
- name: ambassador
- image: {environ[AMBASSADOR_DOCKER_IMAGE]}
+ image: {images[emissary]}
imagePullPolicy: Always
command: [ "sh" ]
args: [ "-c", "while true; do sleep 3600; done" ]
diff --git a/python/tests/integration/manifests/grpc_auth_backend.yaml b/python/tests/integration/manifests/grpc_auth_backend.yaml
index 8c67f3d393..c2817c2623 100644
--- a/python/tests/integration/manifests/grpc_auth_backend.yaml
+++ b/python/tests/integration/manifests/grpc_auth_backend.yaml
@@ -26,7 +26,7 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
diff --git a/python/tests/integration/manifests/grpc_echo_backend.yaml b/python/tests/integration/manifests/grpc_echo_backend.yaml
index 0f8b5452af..44e4c1be9f 100644
--- a/python/tests/integration/manifests/grpc_echo_backend.yaml
+++ b/python/tests/integration/manifests/grpc_echo_backend.yaml
@@ -26,7 +26,7 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
diff --git a/python/tests/integration/manifests/grpc_rls_backend.yaml b/python/tests/integration/manifests/grpc_rls_backend.yaml
index 4df08e995e..f062927605 100644
--- a/python/tests/integration/manifests/grpc_rls_backend.yaml
+++ b/python/tests/integration/manifests/grpc_rls_backend.yaml
@@ -26,7 +26,7 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
diff --git a/python/tests/integration/manifests/kat_client_pod.yaml b/python/tests/integration/manifests/kat_client_pod.yaml
index 687eb539bd..1f2e361e39 100644
--- a/python/tests/integration/manifests/kat_client_pod.yaml
+++ b/python/tests/integration/manifests/kat_client_pod.yaml
@@ -9,4 +9,4 @@ metadata:
spec:
containers:
- name: backend
- image: {environ[KAT_CLIENT_DOCKER_IMAGE]}
+ image: {images[kat-client]}
diff --git a/python/tests/integration/manifests/rbac_cluster_scope.yaml b/python/tests/integration/manifests/rbac_cluster_scope.yaml
index d0aedd1452..f443da2518 100644
--- a/python/tests/integration/manifests/rbac_cluster_scope.yaml
+++ b/python/tests/integration/manifests/rbac_cluster_scope.yaml
@@ -1,138 +1,155 @@
# GENERATED FILE: edits made by hand will not be preserved.
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# Aggregate #
-######################################################################
-# This ClusterRole has an empty `rules` and instead sets
-# `aggregationRule` in order to aggregate several other ClusterRoles
-# together, to avoid the need for multiple ClusterRoleBindings.
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: {self.path.k8s}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: {self.path.k8s}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-multinamespace
- app.kubernetes.io/part-of: kat-rbac-multinamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-multinamespace
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}
rules: []
---
-# Source: emissary-ingress/templates/serviceaccount.yaml
+{serviceAccountExtra}
apiVersion: v1
kind: ServiceAccount
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-multinamespace
- app.kubernetes.io/part-of: kat-rbac-multinamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-multinamespace
product: aes
-{serviceAccountExtra}
+ name: {self.path.k8s}
+ namespace: {self.namespace}
---
-# Source: emissary-ingress/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: {self.path.k8s}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-multinamespace
- app.kubernetes.io/part-of: kat-rbac-multinamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-multinamespace
product: aes
+ name: {self.path.k8s}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {self.path.k8s}
subjects:
-- name: {self.path.k8s}
+- kind: ServiceAccount
+ name: {self.path.k8s}
namespace: {self.namespace}
- kind: ServiceAccount
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# No namespace #
-######################################################################
-# These ClusterRoles should be limited to resource types that are
-# non-namespaced, and therefore cannot be put in a Role, even if
-# Emissary is in single-namespace mode.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: {self.path.k8s}-crd
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-multinamespace
- app.kubernetes.io/part-of: kat-rbac-multinamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-multinamespace
product: aes
rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}-crd
rules:
-- apiGroups: [apiextensions.k8s.io]
- resources: [customresourcedefinitions]
- verbs: [get, list, watch, delete]
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# All namespaces #
-######################################################################
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: {self.path.k8s}-watch
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-multinamespace
- app.kubernetes.io/part-of: kat-rbac-multinamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-multinamespace
product: aes
rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}-watch
rules:
-- apiGroups: ['']
+- apiGroups:
+ - ""
resources:
- namespaces
- services
- secrets
+ - configmaps
- endpoints
- verbs: [get, list, watch]
-
-- apiGroups: [getambassador.io]
- resources: ['*']
- verbs: [get, list, watch, update, patch, create, delete]
-
-- apiGroups: [getambassador.io]
- resources: [mappings/status]
- verbs: [update]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [clusteringresses, ingresses]
- verbs: [get, list, watch]
-
-- apiGroups: [networking.x-k8s.io]
- resources: ['*']
- verbs: [get, list, watch]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [ingresses/status, clusteringresses/status]
- verbs: [update]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses, ingressclasses]
- verbs: [get, list, watch]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses/status]
- verbs: [update]
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - getambassador.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - create
+ - delete
+- apiGroups:
+ - getambassador.io
+ resources:
+ - mappings/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - ingresses/status
+ - clusteringresses/status
+ verbs:
+ - update
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
diff --git a/python/tests/integration/manifests/rbac_namespace_scope.yaml b/python/tests/integration/manifests/rbac_namespace_scope.yaml
index b5e9c20f29..1d4159a87e 100644
--- a/python/tests/integration/manifests/rbac_namespace_scope.yaml
+++ b/python/tests/integration/manifests/rbac_namespace_scope.yaml
@@ -1,164 +1,176 @@
# GENERATED FILE: edits made by hand will not be preserved.
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# Aggregate #
-######################################################################
-# This ClusterRole has an empty `rules` and instead sets
-# `aggregationRule` in order to aggregate several other ClusterRoles
-# together, to avoid the need for multiple ClusterRoleBindings.
+aggregationRule:
+ clusterRoleSelectors:
+ - matchLabels:
+ rbac.getambassador.io/role-group: {self.path.k8s}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: {self.path.k8s}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
-aggregationRule:
- clusterRoleSelectors:
- - matchLabels:
- rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}
rules: []
---
-# Source: emissary-ingress/templates/serviceaccount.yaml
+{serviceAccountExtra}
apiVersion: v1
kind: ServiceAccount
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
-{serviceAccountExtra}
+ name: {self.path.k8s}
+ namespace: {self.namespace}
---
-# Source: emissary-ingress/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: {self.path.k8s}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
+ name: {self.path.k8s}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {self.path.k8s}
subjects:
-- name: {self.path.k8s}
+- kind: ServiceAccount
+ name: {self.path.k8s}
namespace: {self.namespace}
- kind: ServiceAccount
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# No namespace #
-######################################################################
-# These ClusterRoles should be limited to resource types that are
-# non-namespaced, and therefore cannot be put in a Role, even if
-# Emissary is in single-namespace mode.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- name: {self.path.k8s}-crd
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}-crd
rules:
-- apiGroups: [apiextensions.k8s.io]
- resources: [customresourcedefinitions]
- verbs: [get, list, watch, delete]
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - watch
+ - delete
---
-# Source: emissary-ingress/templates/rbac.yaml
-######################################################################
-# All namespaces #
-######################################################################
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
rbac.getambassador.io/role-group: {self.path.k8s}
+ name: {self.path.k8s}
+ namespace: {self.namespace}
rules:
-- apiGroups: ['']
+- apiGroups:
+ - ""
resources:
- namespaces
- services
- secrets
+ - configmaps
- endpoints
- verbs: [get, list, watch]
-
-- apiGroups: [getambassador.io]
- resources: ['*']
- verbs: [get, list, watch, update, patch, create, delete]
-
-- apiGroups: [getambassador.io]
- resources: [mappings/status]
- verbs: [update]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [clusteringresses, ingresses]
- verbs: [get, list, watch]
-
-- apiGroups: [networking.x-k8s.io]
- resources: ['*']
- verbs: [get, list, watch]
-
-- apiGroups: [networking.internal.knative.dev]
- resources: [ingresses/status, clusteringresses/status]
- verbs: [update]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses, ingressclasses]
- verbs: [get, list, watch]
-
-- apiGroups: [extensions, networking.k8s.io]
- resources: [ingresses/status]
- verbs: [update]
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - getambassador.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+ - update
+ - patch
+ - create
+ - delete
+- apiGroups:
+ - getambassador.io
+ resources:
+ - mappings/status
+ verbs:
+ - update
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - clusteringresses
+ - ingresses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.x-k8s.io
+ resources:
+ - '*'
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - networking.internal.knative.dev
+ resources:
+ - ingresses/status
+ - clusteringresses/status
+ verbs:
+ - update
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses
+ - ingressclasses
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ resources:
+ - ingresses/status
+ verbs:
+ - update
---
-# Source: emissary-ingress/templates/rbac.yaml
-# This RoleBinding is only needed if single
-# .Values.scope.singleNamespace because otherwise the ClusterRole will
-# be aggregated in to the master ClusterRole via aggregationRule.
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
- name: {self.path.k8s}
- namespace: {self.namespace}
labels:
- app.kubernetes.io/name: emissary-ingress
-
app.kubernetes.io/instance: kat-rbac-singlenamespace
- app.kubernetes.io/part-of: kat-rbac-singlenamespace
app.kubernetes.io/managed-by: kat
+ app.kubernetes.io/name: emissary-ingress
+ app.kubernetes.io/part-of: kat-rbac-singlenamespace
product: aes
+ name: {self.path.k8s}
+ namespace: {self.namespace}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {self.path.k8s}
subjects:
-- name: {self.path.k8s}
+- kind: ServiceAccount
+ name: {self.path.k8s}
namespace: {self.namespace}
- kind: ServiceAccount
diff --git a/python/tests/integration/manifests/superpod_pod.yaml b/python/tests/integration/manifests/superpod_pod.yaml
index bb4032febf..0b96eb38ab 100644
--- a/python/tests/integration/manifests/superpod_pod.yaml
+++ b/python/tests/integration/manifests/superpod_pod.yaml
@@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
# ports:
# (ports)
env:
diff --git a/python/tests/integration/test_header_case_overrides.py b/python/tests/integration/test_header_case_overrides.py
index 1c889c9b7a..0e634f429f 100644
--- a/python/tests/integration/test_header_case_overrides.py
+++ b/python/tests/integration/test_header_case_overrides.py
@@ -6,7 +6,7 @@
import pytest
import requests
-from tests.utils import install_ambassador, create_httpbin_mapping
+from tests.integration.utils import install_ambassador, create_httpbin_mapping
from tests.kubeutils import apply_kube_artifacts
from tests.runutils import run_and_assert
from tests.manifests import httpbin_manifests
diff --git a/python/tests/integration/test_knative.py b/python/tests/integration/test_knative.py
index 669f4935e8..cd7e953bb1 100644
--- a/python/tests/integration/test_knative.py
+++ b/python/tests/integration/test_knative.py
@@ -6,13 +6,13 @@
import pytest
-from kat.harness import is_knative_compatible
-from kat.harness import load_manifest
from ambassador import Config, IR
from ambassador.fetch import ResourceFetcher
from ambassador.utils import NullSecretHandler, parse_bool
-from tests.utils import install_ambassador, get_code_with_retry, create_qotm_mapping
+import tests.integration.manifests as integration_manifests
+from kat.harness import is_knative_compatible
+from tests.integration.utils import install_ambassador, get_code_with_retry, create_qotm_mapping
from tests.kubeutils import apply_kube_artifacts, delete_kube_artifacts
from tests.runutils import run_with_retry, run_and_assert
from tests.manifests import qotm_manifests
@@ -71,8 +71,8 @@ def test_knative(self):
namespace = 'knative-testing'
# Install Knative
- apply_kube_artifacts(namespace=None, artifacts=load_manifest("knative_serving_crds"))
- apply_kube_artifacts(namespace='knative-serving', artifacts=load_manifest("knative_serving_0.18.0"))
+ apply_kube_artifacts(namespace=None, artifacts=integration_manifests.load("knative_serving_crds"))
+ apply_kube_artifacts(namespace='knative-serving', artifacts=integration_manifests.load("knative_serving_0.18.0"))
run_and_assert(['tools/bin/kubectl', 'patch', 'configmap/config-network', '--type', 'merge', '--patch', r'{"data": {"ingress.class": "ambassador.ingress.networking.knative.dev"}}', '-n', 'knative-serving'])
# Wait for Knative to become ready
diff --git a/python/tests/integration/test_watt_scaling.py b/python/tests/integration/test_watt_scaling.py
index 61ac02bb7c..33a2a10ac4 100644
--- a/python/tests/integration/test_watt_scaling.py
+++ b/python/tests/integration/test_watt_scaling.py
@@ -3,7 +3,7 @@
import pytest
-from tests.utils import install_ambassador, get_code_with_retry
+from tests.integration.utils import install_ambassador, get_code_with_retry
from tests.kubeutils import apply_kube_artifacts, delete_kube_artifacts
from tests.runutils import run_with_retry, run_and_assert
from tests.manifests import qotm_manifests
diff --git a/python/tests/integration/utils.py b/python/tests/integration/utils.py
new file mode 100644
index 0000000000..d228174d79
--- /dev/null
+++ b/python/tests/integration/utils.py
@@ -0,0 +1,181 @@
+import logging
+import json
+import os
+import subprocess
+import requests
+import socket
+import tempfile
+import time
+from collections import namedtuple
+from retry import retry
+
+import json
+import yaml
+
+from ambassador import Cache, IR
+from ambassador.compile import Compile
+from ambassador.utils import NullSecretHandler
+
+import tests.integration.manifests as integration_manifests
+from kat.utils import namespace_manifest
+from tests.manifests import cleartext_host_manifest
+from tests.kubeutils import apply_kube_artifacts
+from tests.runutils import run_and_assert
+
+# Assume that both of these are on the PATH if not explicitly set
+KUBESTATUS_PATH = os.environ.get('KUBESTATUS_PATH', 'kubestatus')
+
+def install_ambassador(namespace, single_namespace=True, envs=None, debug=None):
+ """
+ Install Ambassador into a given namespace. NOTE WELL that although there
+ is a 'single_namespace' parameter, this function probably needs work to do
+ the fully-correct thing with single_namespace False.
+
+ :param namespace: namespace to install Ambassador in
+ :param single_namespace: should we set AMBASSADOR_SINGLE_NAMESPACE? SEE NOTE ABOVE!
+ :param envs: [
+ {
+ 'name': 'ENV_NAME',
+ 'value': 'ENV_VALUE'
+ },
+ ...
+ ...
+ ]
+ """
+
+ if envs is None:
+ envs = []
+
+ if single_namespace:
+ update_envs(envs, "AMBASSADOR_SINGLE_NAMESPACE", "true")
+
+ if debug:
+ update_envs(envs, "AMBASSADOR_DEBUG", debug)
+
+ # Create namespace to install Ambassador
+ create_namespace(namespace)
+
+ # Create Ambassador CRDs
+ apply_kube_artifacts(namespace='emissary-system', artifacts=integration_manifests.CRDmanifests)
+
+ print("Wait for apiext to be running...")
+ run_and_assert(['tools/bin/kubectl', 'wait', '--timeout=90s', '--for=condition=available', 'deploy', 'emissary-apiext', '-n', 'emissary-system'])
+
+ # Proceed to install Ambassador now
+ final_yaml = []
+
+ rbac_manifest_name = 'rbac_namespace_scope' if single_namespace else 'rbac_cluster_scope'
+
+ # Hackish fakes of actual KAT structures -- it's _far_ too much work to synthesize
+ # actual KAT Nodes and Paths.
+ fakeNode = namedtuple('fakeNode', [ 'namespace', 'path', 'ambassador_id' ])
+ fakePath = namedtuple('fakePath', [ 'k8s' ])
+
+ ambassador_yaml = list(yaml.safe_load_all(
+ integration_manifests.format(
+ "\n".join([
+ integration_manifests.load(rbac_manifest_name),
+ integration_manifests.load('ambassador'),
+ (cleartext_host_manifest % namespace),
+ ]),
+ capabilities_block="",
+ envs="",
+ extra_ports="",
+ self=fakeNode(
+ namespace=namespace,
+ ambassador_id='default',
+ path=fakePath(k8s='ambassador')
+ ),
+ )))
+
+ for manifest in ambassador_yaml:
+ kind = manifest.get('kind', None)
+ metadata = manifest.get('metadata', {})
+ name = metadata.get('name', None)
+
+ if (kind == "Pod") and (name == "ambassador"):
+ # Force AMBASSADOR_ID to match ours.
+ #
+ # XXX This is not likely to work without single_namespace=True.
+ for envvar in manifest['spec']['containers'][0]['env']:
+ if envvar.get('name', '') == 'AMBASSADOR_ID':
+ envvar['value'] = 'default'
+
+ # add new envs, if any
+ manifest['spec']['containers'][0]['env'].extend(envs)
+
+ # print("INSTALLING AMBASSADOR: manifests:")
+ # print(yaml.safe_dump_all(ambassador_yaml))
+
+ apply_kube_artifacts(namespace=namespace, artifacts=yaml.safe_dump_all(ambassador_yaml))
+
+
+def update_envs(envs, name, value):
+ found = False
+
+ for e in envs:
+ if e['name'] == name:
+ e['value'] = value
+ found = True
+ break
+
+ if not found:
+ envs.append({
+ 'name': name,
+ 'value': value
+ })
+
+
+def create_namespace(namespace):
+ apply_kube_artifacts(namespace=namespace, artifacts=namespace_manifest(namespace))
+
+
+def create_qotm_mapping(namespace):
+ qotm_mapping = f"""
+---
+apiVersion: getambassador.io/v3alpha1
+kind: Mapping
+metadata:
+ name: qotm-mapping
+ namespace: {namespace}
+spec:
+ hostname: "*"
+ prefix: /qotm/
+ service: qotm
+"""
+
+ apply_kube_artifacts(namespace=namespace, artifacts=qotm_mapping)
+
+def create_httpbin_mapping(namespace):
+ httpbin_mapping = f"""
+---
+apiVersion: getambassador.io/v3alpha1
+kind: Mapping
+metadata:
+ name: httpbin-mapping
+ namespace: {namespace}
+spec:
+ hostname: "*"
+ prefix: /httpbin/
+ rewrite: /
+ service: httpbin
+"""
+
+ apply_kube_artifacts(namespace=namespace, artifacts=httpbin_mapping)
+
+
+def get_code_with_retry(req, headers={}):
+ for attempts in range(10):
+ try:
+ resp = requests.get(req, headers=headers, timeout=10)
+ if resp.status_code < 500:
+ return resp.status_code
+ print(f"get_code_with_retry: 5xx code {resp.status_code}, retrying...")
+ except requests.exceptions.ConnectionError as e:
+ print(f"get_code_with_retry: ConnectionError {e}, attempt {attempts+1}")
+ except socket.timeout as e:
+ print(f"get_code_with_retry: socket.timeout {e}, attempt {attempts+1}")
+ except Exception as e:
+ print(f"get_code_with_retry: generic exception {e}, attempt {attempts+1}")
+ time.sleep(5)
+ return 503
diff --git a/python/tests/kat/abstract_tests.py b/python/tests/kat/abstract_tests.py
index 04ad985588..126f21236b 100644
--- a/python/tests/kat/abstract_tests.py
+++ b/python/tests/kat/abstract_tests.py
@@ -21,18 +21,10 @@
except AttributeError:
pass
-from kat.harness import abstract_test, sanitize, Name, Node, Test, Query, load_manifest
+import tests.integration.manifests as integration_manifests
+from kat.harness import abstract_test, sanitize, Name, Node, Test, Query
from kat.utils import ShellCommand
-RBAC_CLUSTER_SCOPE = load_manifest("rbac_cluster_scope")
-RBAC_NAMESPACE_SCOPE = load_manifest("rbac_namespace_scope")
-AMBASSADOR = load_manifest("ambassador")
-BACKEND = load_manifest("backend")
-GRPC_ECHO_BACKEND = load_manifest("grpc_echo_backend")
-AUTH_BACKEND = load_manifest("auth_backend")
-GRPC_AUTH_BACKEND = load_manifest("grpc_auth_backend")
-GRPC_RLS_BACKEND = load_manifest("grpc_rls_backend")
-
AMBASSADOR_LOCAL = """
---
apiVersion: v1
@@ -104,7 +96,7 @@ class AmbassadorTest(Test):
env: List[str] = []
def manifests(self) -> str:
- rbac = RBAC_CLUSTER_SCOPE
+ rbac = integration_manifests.load("rbac_cluster_scope")
self.manifest_envs += """
- name: POLL_EVERY_SECS
@@ -143,7 +135,7 @@ def manifests(self) -> str:
- name: AMBASSADOR_SINGLE_NAMESPACE
value: "yes"
"""
- rbac = RBAC_NAMESPACE_SCOPE
+ rbac = integration_manifests.load("rbac_namespace_scope")
if self.disable_endpoints:
self.manifest_envs += """
@@ -179,10 +171,13 @@ def manifests(self) -> str:
"""
if DEV:
- return self.format(rbac + AMBASSADOR_LOCAL, extra_ports=eports)
+ return self.format(rbac + AMBASSADOR_LOCAL,
+ extra_ports=eports)
else:
- return self.format(rbac + AMBASSADOR,
- image=os.environ["AMBASSADOR_DOCKER_IMAGE"], envs=self.manifest_envs, extra_ports=eports, capabilities_block = "")
+ return self.format(rbac + integration_manifests.load('ambassador'),
+ envs=self.manifest_envs,
+ extra_ports=eports,
+ capabilities_block="")
# # Will tear this out of the harness shortly
# @property
@@ -413,7 +408,7 @@ class ServiceTypeGrpc(Node):
def __init__(self, service_manifests: str=None, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
- self._manifests = service_manifests or BACKEND
+ self._manifests = service_manifests or integration_manifests.load("backend")
def config(self) -> Generator[Union[str, Tuple[Node, str]], None, None]:
yield from ()
@@ -438,7 +433,7 @@ class EGRPC(ServiceType):
def __init__(self, *args, **kwargs) -> None:
# Do this unconditionally, because that's the point of this class.
- kwargs["service_manifests"] = GRPC_ECHO_BACKEND
+ kwargs["service_manifests"] = integration_manifests.load("grpc_echo_backend")
super().__init__(*args, **kwargs)
def requirements(self):
@@ -453,7 +448,7 @@ class AHTTP(ServiceType):
def __init__(self, *args, **kwargs) -> None:
# Do this unconditionally, because that's the point of this class.
- kwargs["service_manifests"] = AUTH_BACKEND
+ kwargs["service_manifests"] = integration_manifests.load("auth_backend")
super().__init__(*args, **kwargs)
@@ -464,7 +459,7 @@ def __init__(self, protocol_version: str="v2", *args, **kwargs) -> None:
self.protocol_version = protocol_version
# Do this unconditionally, because that's the point of this class.
- kwargs["service_manifests"] = GRPC_AUTH_BACKEND
+ kwargs["service_manifests"] = integration_manifests.load("grpc_auth_backend")
super().__init__(*args, **kwargs)
def requirements(self):
@@ -477,7 +472,7 @@ def __init__(self, protocol_version: str="v2", *args, **kwargs) -> None:
self.protocol_version = protocol_version
# Do this unconditionally, because that's the point of this class.
- kwargs["service_manifests"] = GRPC_RLS_BACKEND
+ kwargs["service_manifests"] = integration_manifests.load("grpc_rls_backend")
super().__init__(*args, **kwargs)
def requirements(self):
diff --git a/python/tests/kat/t_circuitbreaker.py b/python/tests/kat/t_circuitbreaker.py
index a3a57d8811..11b1aff4a9 100644
--- a/python/tests/kat/t_circuitbreaker.py
+++ b/python/tests/kat/t_circuitbreaker.py
@@ -1,14 +1,11 @@
-from typing import Generator, Tuple, Union
-
import os
+from typing import Generator, Tuple, Union
import pytest
+import tests.integration.manifests as integration_manifests
from abstract_tests import AmbassadorTest, HTTP, ServiceType, Node
-from kat.harness import Query, load_manifest
-
-AMBASSADOR = load_manifest("ambassador")
-RBAC_CLUSTER_SCOPE = load_manifest("rbac_cluster_scope")
+from kat.harness import Query
STATSD_MANIFEST = """
---
@@ -98,10 +95,15 @@ def manifests(self) -> str:
requestPolicy:
insecure:
action: Route
-""" + self.format(RBAC_CLUSTER_SCOPE + AMBASSADOR, image=os.environ["AMBASSADOR_DOCKER_IMAGE"],
- envs=envs, extra_ports="", capabilities_block="") + \
- STATSD_MANIFEST.format(name='cbstatsd-sink', image=self.test_image['stats'],
- target=self.__class__.TARGET_CLUSTER)
+""" + \
+ self.format(integration_manifests.load("rbac_cluster_scope") + integration_manifests.load("ambassador"),
+ envs=envs,
+ extra_ports="",
+ capabilities_block="") + \
+ STATSD_MANIFEST.format(
+ name='cbstatsd-sink',
+ image=integration_manifests.images['test-stats'],
+ target=self.__class__.TARGET_CLUSTER)
def config(self) -> Generator[Union[str, Tuple[Node, str]], None, None]:
diff --git a/python/tests/kat/t_headerrouting.py b/python/tests/kat/t_headerrouting.py
index aa907e7717..e589eb7289 100644
--- a/python/tests/kat/t_headerrouting.py
+++ b/python/tests/kat/t_headerrouting.py
@@ -15,7 +15,7 @@ def variants(cls) -> Generator[Node, None, None]:
for v in variants(ServiceType):
yield cls(v, v.clone("target2"), name="{self.target.name}")
- # XXX This type: ignore is here because we're deliberately overriding the
+ # XXX This type: ignore is here because we're deliberately overriding the
# parent's init to have a different signature... but it's also intimately
# (nay, incestuously) related to the variant()'s yield() above, and I really
# don't want to deal with that right now. So. We'll deal with it later.
@@ -86,7 +86,7 @@ def __init__(self, *args, **kwargs) -> None:
spec:
containers:
- name: backend
- image: {self.test_image[auth]}
+ image: {images[test-auth]}
ports:
- containerPort: 80
env:
diff --git a/python/tests/kat/t_ingress.py b/python/tests/kat/t_ingress.py
index bea9863783..728f5033bc 100644
--- a/python/tests/kat/t_ingress.py
+++ b/python/tests/kat/t_ingress.py
@@ -9,7 +9,7 @@
from kat.harness import Query, is_ingress_class_compatible
from abstract_tests import AmbassadorTest, HTTP, ServiceType
from kat.utils import namespace_manifest
-from tests.utils import KUBESTATUS_PATH
+from tests.integration.utils import KUBESTATUS_PATH
from ambassador.utils import parse_bool
class IngressStatusTest1(AmbassadorTest):
diff --git a/python/tests/kat/t_loadbalancer.py b/python/tests/kat/t_loadbalancer.py
index 928fc59bbe..26e706ce2d 100644
--- a/python/tests/kat/t_loadbalancer.py
+++ b/python/tests/kat/t_loadbalancer.py
@@ -2,9 +2,9 @@
import os
-from kat.harness import Query
-
+import tests.integration.manifests as integration_manifests
from abstract_tests import AmbassadorTest, ServiceType, HTTP, Node
+from kat.harness import Query
LOADBALANCER_POD = """
@@ -19,7 +19,7 @@
spec:
containers:
- name: backend
- image: {environ[KAT_SERVER_DOCKER_IMAGE]}
+ image: {images[kat-server]}
ports:
- containerPort: 8080
env:
@@ -170,9 +170,18 @@ def init(self):
def manifests(self) -> str:
backend = self.name.lower() + '-backend'
return \
- LOADBALANCER_POD.format(name='{}-1'.format(self.path.k8s), backend=backend, backend_env='{}-1'.format(self.path.k8s), environ=os.environ) + \
- LOADBALANCER_POD.format(name='{}-2'.format(self.path.k8s), backend=backend, backend_env='{}-2'.format(self.path.k8s), environ=os.environ) + \
- LOADBALANCER_POD.format(name='{}-3'.format(self.path.k8s), backend=backend, backend_env='{}-3'.format(self.path.k8s), environ=os.environ) + """
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-1'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-1'.format(self.path.k8s)) + \
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-2'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-2'.format(self.path.k8s)) + \
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-3'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-3'.format(self.path.k8s)) + """
---
apiVersion: v1
kind: Service
@@ -319,9 +328,18 @@ def init(self):
def manifests(self) -> str:
backend = self.name.lower() + '-backend'
return \
- LOADBALANCER_POD.format(name='{}-1'.format(self.path.k8s), backend=backend, backend_env='{}-1'.format(self.path.k8s), environ=os.environ) + \
- LOADBALANCER_POD.format(name='{}-2'.format(self.path.k8s), backend=backend, backend_env='{}-2'.format(self.path.k8s), environ=os.environ) + \
- LOADBALANCER_POD.format(name='{}-3'.format(self.path.k8s), backend=backend, backend_env='{}-3'.format(self.path.k8s), environ=os.environ) + """
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-1'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-1'.format(self.path.k8s)) + \
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-2'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-2'.format(self.path.k8s)) + \
+ integration_manifests.format(LOADBALANCER_POD,
+ name='{}-3'.format(self.path.k8s),
+ backend=backend,
+ backend_env='{}-3'.format(self.path.k8s)) + """
---
apiVersion: v1
kind: Service
diff --git a/python/tests/kat/t_shadow.py b/python/tests/kat/t_shadow.py
index cf06eae2ae..6745f11d00 100644
--- a/python/tests/kat/t_shadow.py
+++ b/python/tests/kat/t_shadow.py
@@ -10,7 +10,7 @@ class ShadowTestCANFLAKE(MappingTest):
target: ServiceType
shadow: ServiceType
- # XXX This type: ignore is here because we're deliberately overriding the
+ # XXX This type: ignore is here because we're deliberately overriding the
# parent's init to have a different signature... but it's also intimately
# (nay, incestuously) related to the variant()'s yield() above, and I really
# don't want to deal with that right now. So. We'll deal with it later.
@@ -52,7 +52,7 @@ def manifests(self) -> str:
spec:
containers:
- name: shadow
- image: {self.test_image[shadow]}
+ image: {images[test-shadow]}
ports:
- name: http
containerPort: 3000
diff --git a/python/tests/kat/t_stats.py b/python/tests/kat/t_stats.py
index 1e31a701d3..360aa759d5 100644
--- a/python/tests/kat/t_stats.py
+++ b/python/tests/kat/t_stats.py
@@ -1,13 +1,9 @@
-from typing import Generator, Tuple, Union
-
import os
+from typing import Generator, Tuple, Union
-from kat.harness import Query, load_manifest
-
+import tests.integration.manifests as integration_manifests
from abstract_tests import DEV, AmbassadorTest, HTTP, Node
-
-AMBASSADOR = load_manifest("ambassador")
-RBAC_CLUSTER_SCOPE = load_manifest("rbac_cluster_scope")
+from kat.harness import Query
STATSD_TEST_CLUSTER = "statsdtest_http"
ALT_STATSD_TEST_CLUSTER = "short-stats-name"
@@ -117,9 +113,14 @@ def manifests(self) -> str:
value: 'true'
"""
- return self.format(RBAC_CLUSTER_SCOPE + AMBASSADOR, image=os.environ["AMBASSADOR_DOCKER_IMAGE"],
- envs=envs, extra_ports="", capabilities_block="") + \
- GRAPHITE_CONFIG.format('statsd-sink', self.test_image['stats'], f"{STATSD_TEST_CLUSTER}:{ALT_STATSD_TEST_CLUSTER}")
+ return self.format(integration_manifests.load("rbac_cluster_scope") + integration_manifests.load("ambassador"),
+ envs=envs,
+ extra_ports="",
+ capabilities_block="") + \
+ GRAPHITE_CONFIG.format(
+ 'statsd-sink',
+ integration_manifests.images['test-stats'],
+ f"{STATSD_TEST_CLUSTER}:{ALT_STATSD_TEST_CLUSTER}")
def config(self) -> Generator[Union[str, Tuple[Node, str]], None, None]:
yield self.target, self.format("""
@@ -229,9 +230,14 @@ def manifests(self) -> str:
value: 'true'
"""
- return self.format(RBAC_CLUSTER_SCOPE + AMBASSADOR, image=os.environ["AMBASSADOR_DOCKER_IMAGE"],
- envs=envs, extra_ports="", capabilities_block="") + \
- DOGSTATSD_CONFIG.format('dogstatsd-sink', self.test_image['stats'], DOGSTATSD_TEST_CLUSTER)
+ return self.format(integration_manifests.load("rbac_cluster_scope") + integration_manifests.load('ambassador'),
+ envs=envs,
+ extra_ports="",
+ capabilities_block="") + \
+ DOGSTATSD_CONFIG.format(
+ 'dogstatsd-sink',
+ integration_manifests.images['test-stats'],
+ DOGSTATSD_TEST_CLUSTER)
def config(self) -> Generator[Union[str, Tuple[Node, str]], None, None]:
yield self.target, self.format("""
diff --git a/python/tests/utils.py b/python/tests/utils.py
index 5ce21b187a..7e9a073db3 100644
--- a/python/tests/utils.py
+++ b/python/tests/utils.py
@@ -15,8 +15,7 @@
from ambassador import Cache, IR
from ambassador.compile import Compile
from ambassador.utils import NullSecretHandler
-from kat.utils import namespace_manifest
-from kat.harness import load_manifest
+
from tests.manifests import cleartext_host_manifest
from tests.kubeutils import apply_kube_artifacts
from tests.runutils import run_and_assert
@@ -24,180 +23,9 @@
logger = logging.getLogger("ambassador")
ENVOY_PATH = os.environ.get('ENVOY_PATH', '/usr/local/bin/envoy')
-# Assume that both of these are on the PATH if not explicitly set
-KUBESTATUS_PATH = os.environ.get('KUBESTATUS_PATH', 'kubestatus')
SUPPORTED_ENVOY_VERSIONS = ["V2", "V3"]
-
-def install_ambassador(namespace, single_namespace=True, envs=None, debug=None):
- """
- Install Ambassador into a given namespace. NOTE WELL that although there
- is a 'single_namespace' parameter, this function probably needs work to do
- the fully-correct thing with single_namespace False.
-
- :param namespace: namespace to install Ambassador in
- :param single_namespace: should we set AMBASSADOR_SINGLE_NAMESPACE? SEE NOTE ABOVE!
- :param envs: [
- {
- 'name': 'ENV_NAME',
- 'value': 'ENV_VALUE'
- },
- ...
- ...
- ]
- """
-
- if envs is None:
- envs = []
-
- if single_namespace:
- update_envs(envs, "AMBASSADOR_SINGLE_NAMESPACE", "true")
-
- if debug:
- update_envs(envs, "AMBASSADOR_DEBUG", debug)
-
- # Create namespace to install Ambassador
- create_namespace(namespace)
-
- serviceAccountExtra = ''
- if os.environ.get("DEV_USE_IMAGEPULLSECRET", False):
- serviceAccountExtra = """
-imagePullSecrets:
-- name: dev-image-pull-secret
-"""
-
- # Create Ambassador CRDs
- apply_kube_artifacts(namespace='emissary-system', artifacts=(
- # Use .replace instead of .format because there are other '{word}' things in 'description'
- # fields that would cause KeyErrors when .format erroneously tries to evaluate them.
- load_manifest("crds")
- .replace('{image}', os.environ["AMBASSADOR_DOCKER_IMAGE"])
- .replace('{serviceAccountExtra}', serviceAccountExtra)
- ))
-
- print("Wait for apiext to be running...")
- run_and_assert(['tools/bin/kubectl', 'wait', '--timeout=90s', '--for=condition=available', 'deploy', 'emissary-apiext', '-n', 'emissary-system'])
-
- # Proceed to install Ambassador now
- final_yaml = []
-
- rbac_manifest_name = 'rbac_namespace_scope' if single_namespace else 'rbac_cluster_scope'
-
- # Hackish fakes of actual KAT structures -- it's _far_ too much work to synthesize
- # actual KAT Nodes and Paths.
- fakeNode = namedtuple('fakeNode', [ 'namespace', 'path', 'ambassador_id' ])
- fakePath = namedtuple('fakePath', [ 'k8s' ])
-
- ambassador_yaml = list(yaml.safe_load_all((
- load_manifest(rbac_manifest_name) +
- load_manifest('ambassador') +
- (cleartext_host_manifest % namespace)
- ).format(
- capabilities_block="",
- envs="",
- extra_ports="",
- serviceAccountExtra=serviceAccountExtra,
- image=os.environ["AMBASSADOR_DOCKER_IMAGE"],
- self=fakeNode(
- namespace=namespace,
- ambassador_id='default',
- path=fakePath(k8s='ambassador')
- )
- )))
-
- for manifest in ambassador_yaml:
- kind = manifest.get('kind', None)
- metadata = manifest.get('metadata', {})
- name = metadata.get('name', None)
-
- if (kind == "Pod") and (name == "ambassador"):
- # Force AMBASSADOR_ID to match ours.
- #
- # XXX This is not likely to work without single_namespace=True.
- for envvar in manifest['spec']['containers'][0]['env']:
- if envvar.get('name', '') == 'AMBASSADOR_ID':
- envvar['value'] = 'default'
-
- # add new envs, if any
- manifest['spec']['containers'][0]['env'].extend(envs)
-
- # print("INSTALLING AMBASSADOR: manifests:")
- # print(yaml.safe_dump_all(ambassador_yaml))
-
- apply_kube_artifacts(namespace=namespace, artifacts=yaml.safe_dump_all(ambassador_yaml))
-
-
-def update_envs(envs, name, value):
- found = False
-
- for e in envs:
- if e['name'] == name:
- e['value'] = value
- found = True
- break
-
- if not found:
- envs.append({
- 'name': name,
- 'value': value
- })
-
-
-def create_namespace(namespace):
- apply_kube_artifacts(namespace=namespace, artifacts=namespace_manifest(namespace))
-
-
-def create_qotm_mapping(namespace):
- qotm_mapping = f"""
----
-apiVersion: getambassador.io/v3alpha1
-kind: Mapping
-metadata:
- name: qotm-mapping
- namespace: {namespace}
-spec:
- hostname: "*"
- prefix: /qotm/
- service: qotm
-"""
-
- apply_kube_artifacts(namespace=namespace, artifacts=qotm_mapping)
-
-def create_httpbin_mapping(namespace):
- httpbin_mapping = f"""
----
-apiVersion: getambassador.io/v3alpha1
-kind: Mapping
-metadata:
- name: httpbin-mapping
- namespace: {namespace}
-spec:
- hostname: "*"
- prefix: /httpbin/
- rewrite: /
- service: httpbin
-"""
-
- apply_kube_artifacts(namespace=namespace, artifacts=httpbin_mapping)
-
-
-def get_code_with_retry(req, headers={}):
- for attempts in range(10):
- try:
- resp = requests.get(req, headers=headers, timeout=10)
- if resp.status_code < 500:
- return resp.status_code
- print(f"get_code_with_retry: 5xx code {resp.status_code}, retrying...")
- except requests.exceptions.ConnectionError as e:
- print(f"get_code_with_retry: ConnectionError {e}, attempt {attempts+1}")
- except socket.timeout as e:
- print(f"get_code_with_retry: socket.timeout {e}, attempt {attempts+1}")
- except Exception as e:
- print(f"get_code_with_retry: generic exception {e}, attempt {attempts+1}")
- time.sleep(5)
- return 503
-
def zipkin_tracing_service_manifest():
return """
---
diff --git a/python/watch_hook.py b/python/watch_hook.py
index 113d5c02d5..7b70e9b812 100644
--- a/python/watch_hook.py
+++ b/python/watch_hook.py
@@ -26,6 +26,11 @@
ENV_AES_SECRET_NAME = "AMBASSADOR_AES_SECRET_NAME"
ENV_AES_SECRET_NAMESPACE = "AMBASSADOR_AES_SECRET_NAMESPACE"
+# the name of some env vars that can be used for overriding
+# the Cloud Connect Token resource name/namespace
+ENV_CLOUD_CONNECT_TOKEN_RESOURCE_NAME = "AGENT_CONFIG_RESOURCE_NAME"
+ENV_CLOUD_CONNECT_TOKEN_RESOURCE_NAMESPACE = "AGENT_NAMESPACE"
+DEFAULT_CLOUD_CONNECT_TOKEN_RESOURCE_NAME = "ambassador-agent-cloud-token"
# Fake SecretHandler for our fake IR, below.
@@ -147,6 +152,14 @@ def load_yaml(self, yaml_stream):
global_label_selector = os.environ.get('AMBASSADOR_LABEL_SELECTOR', '')
self.logger.debug('label-selector: %s' % global_label_selector)
+ cloud_connect_token_resource_name = os.getenv(ENV_CLOUD_CONNECT_TOKEN_RESOURCE_NAME, DEFAULT_CLOUD_CONNECT_TOKEN_RESOURCE_NAME)
+ cloud_connect_token_resource_namespace = os.getenv(ENV_CLOUD_CONNECT_TOKEN_RESOURCE_NAMESPACE, Config.ambassador_namespace)
+ self.logger.debug(f'cloud-connect-token: need configmap/secret {cloud_connect_token_resource_name}.{cloud_connect_token_resource_namespace}')
+ self.add_kube_watch(f'ConfigMap {cloud_connect_token_resource_name}', 'configmap', namespace=cloud_connect_token_resource_namespace,
+ field_selector=f"metadata.name={cloud_connect_token_resource_name}")
+ self.add_kube_watch(f'Secret {cloud_connect_token_resource_name}', 'secret', namespace=cloud_connect_token_resource_namespace,
+ field_selector=f"metadata.name={cloud_connect_token_resource_name}")
+
# watch the AES Secret if the edge stack is running
if self.fake.edge_stack_allowed:
aes_secret_name = os.getenv(ENV_AES_SECRET_NAME, DEFAULT_AES_SECRET_NAME)
diff --git a/tools/sandbox/.gitignore b/tools/sandbox/.gitignore
deleted file mode 100644
index 99f14c3551..0000000000
--- a/tools/sandbox/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-!package-lock.json
-!dist
diff --git a/tools/sandbox/grpc_auth/config/ambassador.yaml b/tools/sandbox/grpc_auth/config/ambassador.yaml
deleted file mode 100644
index 78c212ca97..0000000000
--- a/tools/sandbox/grpc_auth/config/ambassador.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-apiVersion: getambassador.io/v2
-kind: Module
-name: ambassador
-config: {}
----
-apiVersion: getambassador.io/v2
-kind: AuthService
-name: authentication
-auth_service: "auth-service:8080"
-path_prefix: "/extauth"
-proto: grpc
-allow_request_body: false
----
-apiVersion: getambassador.io/v2
-kind: Mapping
-name: echo-service
-prefix: /
-service: http://echo-service:8080
-host_rewrite: echo-service
----
-apiVersion: getambassador.io/v2
-kind: TracingService
-name: tracing
-service: "zipkin:9411"
-driver: zipkin
diff --git a/tools/sandbox/grpc_auth/docker-compose.yml.in b/tools/sandbox/grpc_auth/docker-compose.yml.in
deleted file mode 100644
index 2ce67ea236..0000000000
--- a/tools/sandbox/grpc_auth/docker-compose.yml.in
+++ /dev/null
@@ -1,59 +0,0 @@
-version: '2'
-services:
-
- # curl -v -H "requested-cookie: foo, bar, baz" -H "requested-status:307" http://localhost:61880/echo-service/get
- ambassador:
- image: docker.io/datawire/ambassador:0.73.0
- ports:
- - 61880:8080
- volumes:
- - ./config:/ambassador/ambassador-config
- environment:
- - AMBASSADOR_NO_KUBEWATCH=no_kubewatch
- networks:
- - ambassador
-
- # curl -v -H "requested-cookie: foo, bar, baz" -H "requested-status:307" http://localhost:61594/echo-service/get
- # curl -v -H "requested-status:200" -H "x-grpc-auth-append: foo=bar; baz=bar" -H "foo: foo" http://localhost:61592/headers
- auth-service:
- image: @KAT_SERVER_DOCKER_IMAGE@
- environment:
- - DEBUG=1
- - BACKEND=true
- - KAT_BACKEND_TYPE=grpc_auth
- networks:
- ambassador:
- aliases:
- - ambassador
- expose:
- - "8080"
- ports:
- - "61594:8080"
-
- echo-service:
- image: @KAT_SERVER_DOCKER_IMAGE@
- environment:
- - DEBUG=1
- - BACKEND=true
- networks:
- ambassador:
- aliases:
- - ambassador
- expose:
- - "80"
- ports:
- - "61595:80"
-
- zipkin:
- image: openzipkin/zipkin:2.17
- networks:
- ambassador:
- aliases:
- - ambassador
- expose:
- - "9411"
- ports:
- - "9411:9411"
-
-networks:
- ambassador: {}
diff --git a/tools/sandbox/grpc_auth/urls.json b/tools/sandbox/grpc_auth/urls.json
deleted file mode 100644
index c6284858a4..0000000000
--- a/tools/sandbox/grpc_auth/urls.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "test": "AuthSetCookiet",
- "id": 1,
- "url": "http://localhost:61892/get",
- "insecure": false,
- "method": "GET",
- "headers": {
- "requested-cookie": "foo, bar, baz",
- "requested-status": "201"
- }
- }
-]
-
diff --git a/tools/sandbox/grpc_web/client.html b/tools/sandbox/grpc_web/client.html
deleted file mode 100644
index 0853664532..0000000000
--- a/tools/sandbox/grpc_web/client.html
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
-
-
-Echo Client
-
-
-
-
-
diff --git a/tools/sandbox/grpc_web/client.js b/tools/sandbox/grpc_web/client.js
deleted file mode 100644
index 91520f8259..0000000000
--- a/tools/sandbox/grpc_web/client.js
+++ /dev/null
@@ -1,28 +0,0 @@
-const {EchoRequest} = require('./echo_pb.js');
-const {EchoServiceClient} = require('./echo_grpc_web_pb.js');
-
-const grpc = {};
-grpc.web = require('grpc-web');
-
-var echoService = new EchoServiceClient('http://localhost:7080', null, null);
-const request = new EchoRequest();
-
-function logMapElements(value, key, map) {
- console.log(` [ ${key} : ${value} ]`);
-}
-
-echoService.echo(request, {'requested-status': 0}, function(err, response) {
- if (err) {
- console.log("Response error code:", err.code);
- console.log("Response error message:", err.message);
- } else {
- console.log("\nRequest header map:");
- response.getRequest().getHeadersMap().forEach(logMapElements);
-
- console.log("\nResponse header map:");
- response.getResponse().getHeadersMap().forEach(logMapElements);
- }
-}).on('status', function(status) {
- console.log("\nExpected code:", 0);
- console.log("\nEcho service responded: ", status.code);
-});
diff --git a/tools/sandbox/grpc_web/config/ambassador.yaml b/tools/sandbox/grpc_web/config/ambassador.yaml
deleted file mode 100644
index ec032f1bfc..0000000000
--- a/tools/sandbox/grpc_web/config/ambassador.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-apiVersion: getambassador.io/v2
-kind: Module
-name: ambassador
-config:
- enable_grpc_web: True
----
-apiVersion: getambassador.io/v2
-kind: Mapping
-grpc: True
-prefix: /echo.EchoService/
-rewrite: /echo.EchoService/
-name: grpc_echo
-service: grpc_echo:8080
-cors:
- origins:
- - "*"
- methods:
- - GET
- - PUT
- - DELETE
- - POST
- - OPTIONS
- headers:
- - keep-alive
- - user-agent
- - cache-control
- - content-type
- - requested-status
- - content-transfer-encoding
- - x-accept-content-transfer-encoding
- - x-accept-response-streaming
- - access-control-request-headers
- - x-user-agent
- - x-grpc-web
- exposed_headers:
- - grpc-status
- - grpc-message
- max_age: "86400"
diff --git a/tools/sandbox/grpc_web/dist/bundle.js b/tools/sandbox/grpc_web/dist/bundle.js
deleted file mode 100644
index b34cc6b786..0000000000
--- a/tools/sandbox/grpc_web/dist/bundle.js
+++ /dev/null
@@ -1,212 +0,0 @@
-/******/ (function(modules) { // webpackBootstrap
-/******/ // The module cache
-/******/ var installedModules = {};
-/******/
-/******/ // The require function
-/******/ function __webpack_require__(moduleId) {
-/******/
-/******/ // Check if module is in cache
-/******/ if(installedModules[moduleId]) {
-/******/ return installedModules[moduleId].exports;
-/******/ }
-/******/ // Create a new module (and put it into the cache)
-/******/ var module = installedModules[moduleId] = {
-/******/ i: moduleId,
-/******/ l: false,
-/******/ exports: {}
-/******/ };
-/******/
-/******/ // Execute the module function
-/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
-/******/
-/******/ // Flag the module as loaded
-/******/ module.l = true;
-/******/
-/******/ // Return the exports of the module
-/******/ return module.exports;
-/******/ }
-/******/
-/******/
-/******/ // expose the modules object (__webpack_modules__)
-/******/ __webpack_require__.m = modules;
-/******/
-/******/ // expose the module cache
-/******/ __webpack_require__.c = installedModules;
-/******/
-/******/ // define getter function for harmony exports
-/******/ __webpack_require__.d = function(exports, name, getter) {
-/******/ if(!__webpack_require__.o(exports, name)) {
-/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
-/******/ }
-/******/ };
-/******/
-/******/ // define __esModule on exports
-/******/ __webpack_require__.r = function(exports) {
-/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
-/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
-/******/ }
-/******/ Object.defineProperty(exports, '__esModule', { value: true });
-/******/ };
-/******/
-/******/ // create a fake namespace object
-/******/ // mode & 1: value is a module id, require it
-/******/ // mode & 2: merge all properties of value into the ns
-/******/ // mode & 4: return value when already ns object
-/******/ // mode & 8|1: behave like require
-/******/ __webpack_require__.t = function(value, mode) {
-/******/ if(mode & 1) value = __webpack_require__(value);
-/******/ if(mode & 8) return value;
-/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
-/******/ var ns = Object.create(null);
-/******/ __webpack_require__.r(ns);
-/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
-/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
-/******/ return ns;
-/******/ };
-/******/
-/******/ // getDefaultExport function for compatibility with non-harmony modules
-/******/ __webpack_require__.n = function(module) {
-/******/ var getter = module && module.__esModule ?
-/******/ function getDefault() { return module['default']; } :
-/******/ function getModuleExports() { return module; };
-/******/ __webpack_require__.d(getter, 'a', getter);
-/******/ return getter;
-/******/ };
-/******/
-/******/ // Object.prototype.hasOwnProperty.call
-/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
-/******/
-/******/ // __webpack_public_path__
-/******/ __webpack_require__.p = "";
-/******/
-/******/
-/******/ // Load entry module and return exports
-/******/ return __webpack_require__(__webpack_require__.s = 0);
-/******/ })
-/************************************************************************/
-/******/ ({
-
-/***/ "./client.js":
-/*!*******************!*\
- !*** ./client.js ***!
- \*******************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-eval("const {EchoRequest} = __webpack_require__(/*! ./echo_pb.js */ \"./echo_pb.js\");\nconst {EchoServiceClient} = __webpack_require__(/*! ./echo_grpc_web_pb.js */ \"./echo_grpc_web_pb.js\");\n\nconst grpc = {};\ngrpc.web = __webpack_require__(/*! grpc-web */ \"./node_modules/grpc-web/index.js\");\n\nvar echoService = new EchoServiceClient('http://localhost:7080', null, null);\nconst request = new EchoRequest();\n\nfunction logMapElements(value, key, map) {\n console.log(` [ ${key} : ${value} ]`);\n}\n\nechoService.echo(request, {'requested-status': 0}, function(err, response) {\n if (err) {\n console.log(\"Response error code:\", err.code);\n console.log(\"Response error message:\", err.message);\n } else {\n console.log(\"\\nRequest header map:\");\n response.getRequest().getHeadersMap().forEach(logMapElements);\n \n console.log(\"\\nResponse header map:\");\n response.getResponse().getHeadersMap().forEach(logMapElements);\n }\n}).on('status', function(status) {\n console.log(\"\\nExpected code:\", 0);\n console.log(\"\\nEcho service responded: \", status.code);\n});\n\n\n//# sourceURL=webpack:///./client.js?");
-
-/***/ }),
-
-/***/ "./echo_grpc_web_pb.js":
-/*!*****************************!*\
- !*** ./echo_grpc_web_pb.js ***!
- \*****************************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-eval("/**\n * @fileoverview gRPC-Web generated client stub for echo\n * @enhanceable\n * @public\n */\n\n// GENERATED CODE -- DO NOT EDIT!\n\n\n\nconst grpc = {};\ngrpc.web = __webpack_require__(/*! grpc-web */ \"./node_modules/grpc-web/index.js\");\n\nconst proto = {};\nproto.echo = __webpack_require__(/*! ./echo_pb.js */ \"./echo_pb.js\");\n\n/**\n * @param {string} hostname\n * @param {?Object} credentials\n * @param {?Object} options\n * @constructor\n * @struct\n * @final\n */\nproto.echo.EchoServiceClient =\n function(hostname, credentials, options) {\n if (!options) options = {};\n options['format'] = 'text';\n\n /**\n * @private @const {!grpc.web.GrpcWebClientBase} The client\n */\n this.client_ = new grpc.web.GrpcWebClientBase(options);\n\n /**\n * @private @const {string} The hostname\n */\n this.hostname_ = hostname;\n\n /**\n * @private @const {?Object} The credentials to be used to connect\n * to the server\n */\n this.credentials_ = credentials;\n\n /**\n * @private @const {?Object} Options for the client\n */\n this.options_ = options;\n};\n\n\n/**\n * @param {string} hostname\n * @param {?Object} credentials\n * @param {?Object} options\n * @constructor\n * @struct\n * @final\n */\nproto.echo.EchoServicePromiseClient =\n function(hostname, credentials, options) {\n if (!options) options = {};\n options['format'] = 'text';\n\n /**\n * @private @const {!proto.echo.EchoServiceClient} The delegate callback based client\n */\n this.delegateClient_ = new proto.echo.EchoServiceClient(\n hostname, credentials, options);\n\n};\n\n\n/**\n * @const\n * @type {!grpc.web.AbstractClientBase.MethodInfo<\n * !proto.echo.EchoRequest,\n * !proto.echo.EchoResponse>}\n */\nconst methodInfo_EchoService_Echo = new grpc.web.AbstractClientBase.MethodInfo(\n proto.echo.EchoResponse,\n /** @param {!proto.echo.EchoRequest} request */\n function(request) {\n return request.serializeBinary();\n },\n proto.echo.EchoResponse.deserializeBinary\n);\n\n\n/**\n * @param {!proto.echo.EchoRequest} request The\n * request proto\n * @param {!Object} metadata User defined\n * call metadata\n * @param {function(?grpc.web.Error, ?proto.echo.EchoResponse)}\n * callback The callback function(error, response)\n * @return {!grpc.web.ClientReadableStream|undefined}\n * The XHR Node Readable Stream\n */\nproto.echo.EchoServiceClient.prototype.echo =\n function(request, metadata, callback) {\n return this.client_.rpcCall(this.hostname_ +\n '/echo.EchoService/Echo',\n request,\n metadata,\n methodInfo_EchoService_Echo,\n callback);\n};\n\n\n/**\n * @param {!proto.echo.EchoRequest} request The\n * request proto\n * @param {!Object} metadata User defined\n * call metadata\n * @return {!Promise}\n * The XHR Node Readable Stream\n */\nproto.echo.EchoServicePromiseClient.prototype.echo =\n function(request, metadata) {\n return new Promise((resolve, reject) => {\n this.delegateClient_.echo(\n request, metadata, (error, response) => {\n error ? reject(error) : resolve(response);\n });\n });\n};\n\n\nmodule.exports = proto.echo;\n\n\n\n//# sourceURL=webpack:///./echo_grpc_web_pb.js?");
-
-/***/ }),
-
-/***/ "./echo_pb.js":
-/*!********************!*\
- !*** ./echo_pb.js ***!
- \********************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-eval("/**\n * @fileoverview\n * @enhanceable\n * @suppress {messageConventions} JS Compiler reports an error if a variable or\n * field starts with 'MSG_' and isn't a translatable message.\n * @public\n */\n// GENERATED CODE -- DO NOT EDIT!\n\nvar jspb = __webpack_require__(/*! google-protobuf */ \"./node_modules/google-protobuf/google-protobuf.js\");\nvar goog = jspb;\nvar global = Function('return this')();\n\ngoog.exportSymbol('proto.echo.EchoRequest', null, global);\ngoog.exportSymbol('proto.echo.EchoResponse', null, global);\ngoog.exportSymbol('proto.echo.Request', null, global);\ngoog.exportSymbol('proto.echo.Response', null, global);\ngoog.exportSymbol('proto.echo.TLS', null, global);\n\n/**\n * Generated by JsPbCodeGenerator.\n * @param {Array=} opt_data Optional initial data array, typically from a\n * server response, or constructed directly in Javascript. The array is used\n * in place and becomes part of the constructed object. It is not cloned.\n * If no data is provided, the constructed object will be empty, but still\n * valid.\n * @extends {jspb.Message}\n * @constructor\n */\nproto.echo.EchoRequest = function(opt_data) {\n jspb.Message.initialize(this, opt_data, 0, -1, null, null);\n};\ngoog.inherits(proto.echo.EchoRequest, jspb.Message);\nif (goog.DEBUG && !COMPILED) {\n proto.echo.EchoRequest.displayName = 'proto.echo.EchoRequest';\n}\n\n\nif (jspb.Message.GENERATE_TO_OBJECT) {\n/**\n * Creates an object representation of this proto suitable for use in Soy templates.\n * Field names that are reserved in JavaScript and will be renamed to pb_name.\n * To access a reserved field use, foo.pb_, eg, foo.pb_default.\n * For the list of reserved names please see:\n * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n * @param {boolean=} opt_includeInstance Whether to include the JSPB instance\n * for transitional soy proto support: http://goto/soy-param-migration\n * @return {!Object}\n */\nproto.echo.EchoRequest.prototype.toObject = function(opt_includeInstance) {\n return proto.echo.EchoRequest.toObject(opt_includeInstance, this);\n};\n\n\n/**\n * Static version of the {@see toObject} method.\n * @param {boolean|undefined} includeInstance Whether to include the JSPB\n * instance for transitional soy proto support:\n * http://goto/soy-param-migration\n * @param {!proto.echo.EchoRequest} msg The msg instance to transform.\n * @return {!Object}\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.EchoRequest.toObject = function(includeInstance, msg) {\n var f, obj = {\n data: jspb.Message.getFieldWithDefault(msg, 1, \"\")\n };\n\n if (includeInstance) {\n obj.$jspbMessageInstance = msg;\n }\n return obj;\n};\n}\n\n\n/**\n * Deserializes binary data (in protobuf wire format).\n * @param {jspb.ByteSource} bytes The bytes to deserialize.\n * @return {!proto.echo.EchoRequest}\n */\nproto.echo.EchoRequest.deserializeBinary = function(bytes) {\n var reader = new jspb.BinaryReader(bytes);\n var msg = new proto.echo.EchoRequest;\n return proto.echo.EchoRequest.deserializeBinaryFromReader(msg, reader);\n};\n\n\n/**\n * Deserializes binary data (in protobuf wire format) from the\n * given reader into the given message object.\n * @param {!proto.echo.EchoRequest} msg The message object to deserialize into.\n * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n * @return {!proto.echo.EchoRequest}\n */\nproto.echo.EchoRequest.deserializeBinaryFromReader = function(msg, reader) {\n while (reader.nextField()) {\n if (reader.isEndGroup()) {\n break;\n }\n var field = reader.getFieldNumber();\n switch (field) {\n case 1:\n var value = /** @type {string} */ (reader.readString());\n msg.setData(value);\n break;\n default:\n reader.skipField();\n break;\n }\n }\n return msg;\n};\n\n\n/**\n * Serializes the message to binary data (in protobuf wire format).\n * @return {!Uint8Array}\n */\nproto.echo.EchoRequest.prototype.serializeBinary = function() {\n var writer = new jspb.BinaryWriter();\n proto.echo.EchoRequest.serializeBinaryToWriter(this, writer);\n return writer.getResultBuffer();\n};\n\n\n/**\n * Serializes the given message to binary data (in protobuf wire\n * format), writing to the given BinaryWriter.\n * @param {!proto.echo.EchoRequest} message\n * @param {!jspb.BinaryWriter} writer\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.EchoRequest.serializeBinaryToWriter = function(message, writer) {\n var f = undefined;\n f = message.getData();\n if (f.length > 0) {\n writer.writeString(\n 1,\n f\n );\n }\n};\n\n\n/**\n * optional string data = 1;\n * @return {string}\n */\nproto.echo.EchoRequest.prototype.getData = function() {\n return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, \"\"));\n};\n\n\n/** @param {string} value */\nproto.echo.EchoRequest.prototype.setData = function(value) {\n jspb.Message.setProto3StringField(this, 1, value);\n};\n\n\n\n/**\n * Generated by JsPbCodeGenerator.\n * @param {Array=} opt_data Optional initial data array, typically from a\n * server response, or constructed directly in Javascript. The array is used\n * in place and becomes part of the constructed object. It is not cloned.\n * If no data is provided, the constructed object will be empty, but still\n * valid.\n * @extends {jspb.Message}\n * @constructor\n */\nproto.echo.EchoResponse = function(opt_data) {\n jspb.Message.initialize(this, opt_data, 0, -1, null, null);\n};\ngoog.inherits(proto.echo.EchoResponse, jspb.Message);\nif (goog.DEBUG && !COMPILED) {\n proto.echo.EchoResponse.displayName = 'proto.echo.EchoResponse';\n}\n\n\nif (jspb.Message.GENERATE_TO_OBJECT) {\n/**\n * Creates an object representation of this proto suitable for use in Soy templates.\n * Field names that are reserved in JavaScript and will be renamed to pb_name.\n * To access a reserved field use, foo.pb_, eg, foo.pb_default.\n * For the list of reserved names please see:\n * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n * @param {boolean=} opt_includeInstance Whether to include the JSPB instance\n * for transitional soy proto support: http://goto/soy-param-migration\n * @return {!Object}\n */\nproto.echo.EchoResponse.prototype.toObject = function(opt_includeInstance) {\n return proto.echo.EchoResponse.toObject(opt_includeInstance, this);\n};\n\n\n/**\n * Static version of the {@see toObject} method.\n * @param {boolean|undefined} includeInstance Whether to include the JSPB\n * instance for transitional soy proto support:\n * http://goto/soy-param-migration\n * @param {!proto.echo.EchoResponse} msg The msg instance to transform.\n * @return {!Object}\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.EchoResponse.toObject = function(includeInstance, msg) {\n var f, obj = {\n backend: jspb.Message.getFieldWithDefault(msg, 1, \"\"),\n request: (f = msg.getRequest()) && proto.echo.Request.toObject(includeInstance, f),\n response: (f = msg.getResponse()) && proto.echo.Response.toObject(includeInstance, f)\n };\n\n if (includeInstance) {\n obj.$jspbMessageInstance = msg;\n }\n return obj;\n};\n}\n\n\n/**\n * Deserializes binary data (in protobuf wire format).\n * @param {jspb.ByteSource} bytes The bytes to deserialize.\n * @return {!proto.echo.EchoResponse}\n */\nproto.echo.EchoResponse.deserializeBinary = function(bytes) {\n var reader = new jspb.BinaryReader(bytes);\n var msg = new proto.echo.EchoResponse;\n return proto.echo.EchoResponse.deserializeBinaryFromReader(msg, reader);\n};\n\n\n/**\n * Deserializes binary data (in protobuf wire format) from the\n * given reader into the given message object.\n * @param {!proto.echo.EchoResponse} msg The message object to deserialize into.\n * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n * @return {!proto.echo.EchoResponse}\n */\nproto.echo.EchoResponse.deserializeBinaryFromReader = function(msg, reader) {\n while (reader.nextField()) {\n if (reader.isEndGroup()) {\n break;\n }\n var field = reader.getFieldNumber();\n switch (field) {\n case 1:\n var value = /** @type {string} */ (reader.readString());\n msg.setBackend(value);\n break;\n case 2:\n var value = new proto.echo.Request;\n reader.readMessage(value,proto.echo.Request.deserializeBinaryFromReader);\n msg.setRequest(value);\n break;\n case 3:\n var value = new proto.echo.Response;\n reader.readMessage(value,proto.echo.Response.deserializeBinaryFromReader);\n msg.setResponse(value);\n break;\n default:\n reader.skipField();\n break;\n }\n }\n return msg;\n};\n\n\n/**\n * Serializes the message to binary data (in protobuf wire format).\n * @return {!Uint8Array}\n */\nproto.echo.EchoResponse.prototype.serializeBinary = function() {\n var writer = new jspb.BinaryWriter();\n proto.echo.EchoResponse.serializeBinaryToWriter(this, writer);\n return writer.getResultBuffer();\n};\n\n\n/**\n * Serializes the given message to binary data (in protobuf wire\n * format), writing to the given BinaryWriter.\n * @param {!proto.echo.EchoResponse} message\n * @param {!jspb.BinaryWriter} writer\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.EchoResponse.serializeBinaryToWriter = function(message, writer) {\n var f = undefined;\n f = message.getBackend();\n if (f.length > 0) {\n writer.writeString(\n 1,\n f\n );\n }\n f = message.getRequest();\n if (f != null) {\n writer.writeMessage(\n 2,\n f,\n proto.echo.Request.serializeBinaryToWriter\n );\n }\n f = message.getResponse();\n if (f != null) {\n writer.writeMessage(\n 3,\n f,\n proto.echo.Response.serializeBinaryToWriter\n );\n }\n};\n\n\n/**\n * optional string backend = 1;\n * @return {string}\n */\nproto.echo.EchoResponse.prototype.getBackend = function() {\n return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, \"\"));\n};\n\n\n/** @param {string} value */\nproto.echo.EchoResponse.prototype.setBackend = function(value) {\n jspb.Message.setProto3StringField(this, 1, value);\n};\n\n\n/**\n * optional Request request = 2;\n * @return {?proto.echo.Request}\n */\nproto.echo.EchoResponse.prototype.getRequest = function() {\n return /** @type{?proto.echo.Request} */ (\n jspb.Message.getWrapperField(this, proto.echo.Request, 2));\n};\n\n\n/** @param {?proto.echo.Request|undefined} value */\nproto.echo.EchoResponse.prototype.setRequest = function(value) {\n jspb.Message.setWrapperField(this, 2, value);\n};\n\n\nproto.echo.EchoResponse.prototype.clearRequest = function() {\n this.setRequest(undefined);\n};\n\n\n/**\n * Returns whether this field is set.\n * @return {!boolean}\n */\nproto.echo.EchoResponse.prototype.hasRequest = function() {\n return jspb.Message.getField(this, 2) != null;\n};\n\n\n/**\n * optional Response response = 3;\n * @return {?proto.echo.Response}\n */\nproto.echo.EchoResponse.prototype.getResponse = function() {\n return /** @type{?proto.echo.Response} */ (\n jspb.Message.getWrapperField(this, proto.echo.Response, 3));\n};\n\n\n/** @param {?proto.echo.Response|undefined} value */\nproto.echo.EchoResponse.prototype.setResponse = function(value) {\n jspb.Message.setWrapperField(this, 3, value);\n};\n\n\nproto.echo.EchoResponse.prototype.clearResponse = function() {\n this.setResponse(undefined);\n};\n\n\n/**\n * Returns whether this field is set.\n * @return {!boolean}\n */\nproto.echo.EchoResponse.prototype.hasResponse = function() {\n return jspb.Message.getField(this, 3) != null;\n};\n\n\n\n/**\n * Generated by JsPbCodeGenerator.\n * @param {Array=} opt_data Optional initial data array, typically from a\n * server response, or constructed directly in Javascript. The array is used\n * in place and becomes part of the constructed object. It is not cloned.\n * If no data is provided, the constructed object will be empty, but still\n * valid.\n * @extends {jspb.Message}\n * @constructor\n */\nproto.echo.Response = function(opt_data) {\n jspb.Message.initialize(this, opt_data, 0, -1, null, null);\n};\ngoog.inherits(proto.echo.Response, jspb.Message);\nif (goog.DEBUG && !COMPILED) {\n proto.echo.Response.displayName = 'proto.echo.Response';\n}\n\n\nif (jspb.Message.GENERATE_TO_OBJECT) {\n/**\n * Creates an object representation of this proto suitable for use in Soy templates.\n * Field names that are reserved in JavaScript and will be renamed to pb_name.\n * To access a reserved field use, foo.pb_, eg, foo.pb_default.\n * For the list of reserved names please see:\n * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n * @param {boolean=} opt_includeInstance Whether to include the JSPB instance\n * for transitional soy proto support: http://goto/soy-param-migration\n * @return {!Object}\n */\nproto.echo.Response.prototype.toObject = function(opt_includeInstance) {\n return proto.echo.Response.toObject(opt_includeInstance, this);\n};\n\n\n/**\n * Static version of the {@see toObject} method.\n * @param {boolean|undefined} includeInstance Whether to include the JSPB\n * instance for transitional soy proto support:\n * http://goto/soy-param-migration\n * @param {!proto.echo.Response} msg The msg instance to transform.\n * @return {!Object}\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.Response.toObject = function(includeInstance, msg) {\n var f, obj = {\n headersMap: (f = msg.getHeadersMap()) ? f.toObject(includeInstance, undefined) : []\n };\n\n if (includeInstance) {\n obj.$jspbMessageInstance = msg;\n }\n return obj;\n};\n}\n\n\n/**\n * Deserializes binary data (in protobuf wire format).\n * @param {jspb.ByteSource} bytes The bytes to deserialize.\n * @return {!proto.echo.Response}\n */\nproto.echo.Response.deserializeBinary = function(bytes) {\n var reader = new jspb.BinaryReader(bytes);\n var msg = new proto.echo.Response;\n return proto.echo.Response.deserializeBinaryFromReader(msg, reader);\n};\n\n\n/**\n * Deserializes binary data (in protobuf wire format) from the\n * given reader into the given message object.\n * @param {!proto.echo.Response} msg The message object to deserialize into.\n * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n * @return {!proto.echo.Response}\n */\nproto.echo.Response.deserializeBinaryFromReader = function(msg, reader) {\n while (reader.nextField()) {\n if (reader.isEndGroup()) {\n break;\n }\n var field = reader.getFieldNumber();\n switch (field) {\n case 1:\n var value = msg.getHeadersMap();\n reader.readMessage(value, function(message, reader) {\n jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readString, jspb.BinaryReader.prototype.readString);\n });\n break;\n default:\n reader.skipField();\n break;\n }\n }\n return msg;\n};\n\n\n/**\n * Serializes the message to binary data (in protobuf wire format).\n * @return {!Uint8Array}\n */\nproto.echo.Response.prototype.serializeBinary = function() {\n var writer = new jspb.BinaryWriter();\n proto.echo.Response.serializeBinaryToWriter(this, writer);\n return writer.getResultBuffer();\n};\n\n\n/**\n * Serializes the given message to binary data (in protobuf wire\n * format), writing to the given BinaryWriter.\n * @param {!proto.echo.Response} message\n * @param {!jspb.BinaryWriter} writer\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.Response.serializeBinaryToWriter = function(message, writer) {\n var f = undefined;\n f = message.getHeadersMap(true);\n if (f && f.getLength() > 0) {\n f.serializeBinary(1, writer, jspb.BinaryWriter.prototype.writeString, jspb.BinaryWriter.prototype.writeString);\n }\n};\n\n\n/**\n * map headers = 1;\n * @param {boolean=} opt_noLazyCreate Do not create the map if\n * empty, instead returning `undefined`\n * @return {!jspb.Map}\n */\nproto.echo.Response.prototype.getHeadersMap = function(opt_noLazyCreate) {\n return /** @type {!jspb.Map} */ (\n jspb.Message.getMapField(this, 1, opt_noLazyCreate,\n null));\n};\n\n\nproto.echo.Response.prototype.clearHeadersMap = function() {\n this.getHeadersMap().clear();\n};\n\n\n\n/**\n * Generated by JsPbCodeGenerator.\n * @param {Array=} opt_data Optional initial data array, typically from a\n * server response, or constructed directly in Javascript. The array is used\n * in place and becomes part of the constructed object. It is not cloned.\n * If no data is provided, the constructed object will be empty, but still\n * valid.\n * @extends {jspb.Message}\n * @constructor\n */\nproto.echo.Request = function(opt_data) {\n jspb.Message.initialize(this, opt_data, 0, -1, null, null);\n};\ngoog.inherits(proto.echo.Request, jspb.Message);\nif (goog.DEBUG && !COMPILED) {\n proto.echo.Request.displayName = 'proto.echo.Request';\n}\n\n\nif (jspb.Message.GENERATE_TO_OBJECT) {\n/**\n * Creates an object representation of this proto suitable for use in Soy templates.\n * Field names that are reserved in JavaScript and will be renamed to pb_name.\n * To access a reserved field use, foo.pb_, eg, foo.pb_default.\n * For the list of reserved names please see:\n * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n * @param {boolean=} opt_includeInstance Whether to include the JSPB instance\n * for transitional soy proto support: http://goto/soy-param-migration\n * @return {!Object}\n */\nproto.echo.Request.prototype.toObject = function(opt_includeInstance) {\n return proto.echo.Request.toObject(opt_includeInstance, this);\n};\n\n\n/**\n * Static version of the {@see toObject} method.\n * @param {boolean|undefined} includeInstance Whether to include the JSPB\n * instance for transitional soy proto support:\n * http://goto/soy-param-migration\n * @param {!proto.echo.Request} msg The msg instance to transform.\n * @return {!Object}\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.Request.toObject = function(includeInstance, msg) {\n var f, obj = {\n headersMap: (f = msg.getHeadersMap()) ? f.toObject(includeInstance, undefined) : [],\n tls: (f = msg.getTls()) && proto.echo.TLS.toObject(includeInstance, f)\n };\n\n if (includeInstance) {\n obj.$jspbMessageInstance = msg;\n }\n return obj;\n};\n}\n\n\n/**\n * Deserializes binary data (in protobuf wire format).\n * @param {jspb.ByteSource} bytes The bytes to deserialize.\n * @return {!proto.echo.Request}\n */\nproto.echo.Request.deserializeBinary = function(bytes) {\n var reader = new jspb.BinaryReader(bytes);\n var msg = new proto.echo.Request;\n return proto.echo.Request.deserializeBinaryFromReader(msg, reader);\n};\n\n\n/**\n * Deserializes binary data (in protobuf wire format) from the\n * given reader into the given message object.\n * @param {!proto.echo.Request} msg The message object to deserialize into.\n * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n * @return {!proto.echo.Request}\n */\nproto.echo.Request.deserializeBinaryFromReader = function(msg, reader) {\n while (reader.nextField()) {\n if (reader.isEndGroup()) {\n break;\n }\n var field = reader.getFieldNumber();\n switch (field) {\n case 1:\n var value = msg.getHeadersMap();\n reader.readMessage(value, function(message, reader) {\n jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readString, jspb.BinaryReader.prototype.readString);\n });\n break;\n case 2:\n var value = new proto.echo.TLS;\n reader.readMessage(value,proto.echo.TLS.deserializeBinaryFromReader);\n msg.setTls(value);\n break;\n default:\n reader.skipField();\n break;\n }\n }\n return msg;\n};\n\n\n/**\n * Serializes the message to binary data (in protobuf wire format).\n * @return {!Uint8Array}\n */\nproto.echo.Request.prototype.serializeBinary = function() {\n var writer = new jspb.BinaryWriter();\n proto.echo.Request.serializeBinaryToWriter(this, writer);\n return writer.getResultBuffer();\n};\n\n\n/**\n * Serializes the given message to binary data (in protobuf wire\n * format), writing to the given BinaryWriter.\n * @param {!proto.echo.Request} message\n * @param {!jspb.BinaryWriter} writer\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.Request.serializeBinaryToWriter = function(message, writer) {\n var f = undefined;\n f = message.getHeadersMap(true);\n if (f && f.getLength() > 0) {\n f.serializeBinary(1, writer, jspb.BinaryWriter.prototype.writeString, jspb.BinaryWriter.prototype.writeString);\n }\n f = message.getTls();\n if (f != null) {\n writer.writeMessage(\n 2,\n f,\n proto.echo.TLS.serializeBinaryToWriter\n );\n }\n};\n\n\n/**\n * map headers = 1;\n * @param {boolean=} opt_noLazyCreate Do not create the map if\n * empty, instead returning `undefined`\n * @return {!jspb.Map}\n */\nproto.echo.Request.prototype.getHeadersMap = function(opt_noLazyCreate) {\n return /** @type {!jspb.Map} */ (\n jspb.Message.getMapField(this, 1, opt_noLazyCreate,\n null));\n};\n\n\nproto.echo.Request.prototype.clearHeadersMap = function() {\n this.getHeadersMap().clear();\n};\n\n\n/**\n * optional TLS tls = 2;\n * @return {?proto.echo.TLS}\n */\nproto.echo.Request.prototype.getTls = function() {\n return /** @type{?proto.echo.TLS} */ (\n jspb.Message.getWrapperField(this, proto.echo.TLS, 2));\n};\n\n\n/** @param {?proto.echo.TLS|undefined} value */\nproto.echo.Request.prototype.setTls = function(value) {\n jspb.Message.setWrapperField(this, 2, value);\n};\n\n\nproto.echo.Request.prototype.clearTls = function() {\n this.setTls(undefined);\n};\n\n\n/**\n * Returns whether this field is set.\n * @return {!boolean}\n */\nproto.echo.Request.prototype.hasTls = function() {\n return jspb.Message.getField(this, 2) != null;\n};\n\n\n\n/**\n * Generated by JsPbCodeGenerator.\n * @param {Array=} opt_data Optional initial data array, typically from a\n * server response, or constructed directly in Javascript. The array is used\n * in place and becomes part of the constructed object. It is not cloned.\n * If no data is provided, the constructed object will be empty, but still\n * valid.\n * @extends {jspb.Message}\n * @constructor\n */\nproto.echo.TLS = function(opt_data) {\n jspb.Message.initialize(this, opt_data, 0, -1, null, null);\n};\ngoog.inherits(proto.echo.TLS, jspb.Message);\nif (goog.DEBUG && !COMPILED) {\n proto.echo.TLS.displayName = 'proto.echo.TLS';\n}\n\n\nif (jspb.Message.GENERATE_TO_OBJECT) {\n/**\n * Creates an object representation of this proto suitable for use in Soy templates.\n * Field names that are reserved in JavaScript and will be renamed to pb_name.\n * To access a reserved field use, foo.pb_, eg, foo.pb_default.\n * For the list of reserved names please see:\n * com.google.apps.jspb.JsClassTemplate.JS_RESERVED_WORDS.\n * @param {boolean=} opt_includeInstance Whether to include the JSPB instance\n * for transitional soy proto support: http://goto/soy-param-migration\n * @return {!Object}\n */\nproto.echo.TLS.prototype.toObject = function(opt_includeInstance) {\n return proto.echo.TLS.toObject(opt_includeInstance, this);\n};\n\n\n/**\n * Static version of the {@see toObject} method.\n * @param {boolean|undefined} includeInstance Whether to include the JSPB\n * instance for transitional soy proto support:\n * http://goto/soy-param-migration\n * @param {!proto.echo.TLS} msg The msg instance to transform.\n * @return {!Object}\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.TLS.toObject = function(includeInstance, msg) {\n var f, obj = {\n enabled: jspb.Message.getFieldWithDefault(msg, 1, false)\n };\n\n if (includeInstance) {\n obj.$jspbMessageInstance = msg;\n }\n return obj;\n};\n}\n\n\n/**\n * Deserializes binary data (in protobuf wire format).\n * @param {jspb.ByteSource} bytes The bytes to deserialize.\n * @return {!proto.echo.TLS}\n */\nproto.echo.TLS.deserializeBinary = function(bytes) {\n var reader = new jspb.BinaryReader(bytes);\n var msg = new proto.echo.TLS;\n return proto.echo.TLS.deserializeBinaryFromReader(msg, reader);\n};\n\n\n/**\n * Deserializes binary data (in protobuf wire format) from the\n * given reader into the given message object.\n * @param {!proto.echo.TLS} msg The message object to deserialize into.\n * @param {!jspb.BinaryReader} reader The BinaryReader to use.\n * @return {!proto.echo.TLS}\n */\nproto.echo.TLS.deserializeBinaryFromReader = function(msg, reader) {\n while (reader.nextField()) {\n if (reader.isEndGroup()) {\n break;\n }\n var field = reader.getFieldNumber();\n switch (field) {\n case 1:\n var value = /** @type {boolean} */ (reader.readBool());\n msg.setEnabled(value);\n break;\n default:\n reader.skipField();\n break;\n }\n }\n return msg;\n};\n\n\n/**\n * Serializes the message to binary data (in protobuf wire format).\n * @return {!Uint8Array}\n */\nproto.echo.TLS.prototype.serializeBinary = function() {\n var writer = new jspb.BinaryWriter();\n proto.echo.TLS.serializeBinaryToWriter(this, writer);\n return writer.getResultBuffer();\n};\n\n\n/**\n * Serializes the given message to binary data (in protobuf wire\n * format), writing to the given BinaryWriter.\n * @param {!proto.echo.TLS} message\n * @param {!jspb.BinaryWriter} writer\n * @suppress {unusedLocalVariables} f is only used for nested messages\n */\nproto.echo.TLS.serializeBinaryToWriter = function(message, writer) {\n var f = undefined;\n f = message.getEnabled();\n if (f) {\n writer.writeBool(\n 1,\n f\n );\n }\n};\n\n\n/**\n * optional bool enabled = 1;\n * Note that Boolean fields may be set to 0/1 when serialized from a Java server.\n * You should avoid comparisons like {@code val === true/false} in those cases.\n * @return {boolean}\n */\nproto.echo.TLS.prototype.getEnabled = function() {\n return /** @type {boolean} */ (jspb.Message.getFieldWithDefault(this, 1, false));\n};\n\n\n/** @param {boolean} value */\nproto.echo.TLS.prototype.setEnabled = function(value) {\n jspb.Message.setProto3BooleanField(this, 1, value);\n};\n\n\ngoog.object.extend(exports, proto.echo);\n\n\n//# sourceURL=webpack:///./echo_pb.js?");
-
-/***/ }),
-
-/***/ "./node_modules/base64-js/index.js":
-/*!*****************************************!*\
- !*** ./node_modules/base64-js/index.js ***!
- \*****************************************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-"use strict";
-eval("\n\nexports.byteLength = byteLength\nexports.toByteArray = toByteArray\nexports.fromByteArray = fromByteArray\n\nvar lookup = []\nvar revLookup = []\nvar Arr = typeof Uint8Array !== 'undefined' ? Uint8Array : Array\n\nvar code = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\nfor (var i = 0, len = code.length; i < len; ++i) {\n lookup[i] = code[i]\n revLookup[code.charCodeAt(i)] = i\n}\n\n// Support decoding URL-safe base64 strings, as Node.js does.\n// See: https://en.wikipedia.org/wiki/Base64#URL_applications\nrevLookup['-'.charCodeAt(0)] = 62\nrevLookup['_'.charCodeAt(0)] = 63\n\nfunction getLens (b64) {\n var len = b64.length\n\n if (len % 4 > 0) {\n throw new Error('Invalid string. Length must be a multiple of 4')\n }\n\n // Trim off extra bytes after placeholder bytes are found\n // See: https://github.com/beatgammit/base64-js/issues/42\n var validLen = b64.indexOf('=')\n if (validLen === -1) validLen = len\n\n var placeHoldersLen = validLen === len\n ? 0\n : 4 - (validLen % 4)\n\n return [validLen, placeHoldersLen]\n}\n\n// base64 is 4/3 + up to two characters of the original data\nfunction byteLength (b64) {\n var lens = getLens(b64)\n var validLen = lens[0]\n var placeHoldersLen = lens[1]\n return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen\n}\n\nfunction _byteLength (b64, validLen, placeHoldersLen) {\n return ((validLen + placeHoldersLen) * 3 / 4) - placeHoldersLen\n}\n\nfunction toByteArray (b64) {\n var tmp\n var lens = getLens(b64)\n var validLen = lens[0]\n var placeHoldersLen = lens[1]\n\n var arr = new Arr(_byteLength(b64, validLen, placeHoldersLen))\n\n var curByte = 0\n\n // if there are placeholders, only get up to the last complete 4 chars\n var len = placeHoldersLen > 0\n ? validLen - 4\n : validLen\n\n for (var i = 0; i < len; i += 4) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 18) |\n (revLookup[b64.charCodeAt(i + 1)] << 12) |\n (revLookup[b64.charCodeAt(i + 2)] << 6) |\n revLookup[b64.charCodeAt(i + 3)]\n arr[curByte++] = (tmp >> 16) & 0xFF\n arr[curByte++] = (tmp >> 8) & 0xFF\n arr[curByte++] = tmp & 0xFF\n }\n\n if (placeHoldersLen === 2) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 2) |\n (revLookup[b64.charCodeAt(i + 1)] >> 4)\n arr[curByte++] = tmp & 0xFF\n }\n\n if (placeHoldersLen === 1) {\n tmp =\n (revLookup[b64.charCodeAt(i)] << 10) |\n (revLookup[b64.charCodeAt(i + 1)] << 4) |\n (revLookup[b64.charCodeAt(i + 2)] >> 2)\n arr[curByte++] = (tmp >> 8) & 0xFF\n arr[curByte++] = tmp & 0xFF\n }\n\n return arr\n}\n\nfunction tripletToBase64 (num) {\n return lookup[num >> 18 & 0x3F] +\n lookup[num >> 12 & 0x3F] +\n lookup[num >> 6 & 0x3F] +\n lookup[num & 0x3F]\n}\n\nfunction encodeChunk (uint8, start, end) {\n var tmp\n var output = []\n for (var i = start; i < end; i += 3) {\n tmp =\n ((uint8[i] << 16) & 0xFF0000) +\n ((uint8[i + 1] << 8) & 0xFF00) +\n (uint8[i + 2] & 0xFF)\n output.push(tripletToBase64(tmp))\n }\n return output.join('')\n}\n\nfunction fromByteArray (uint8) {\n var tmp\n var len = uint8.length\n var extraBytes = len % 3 // if we have 1 byte left, pad 2 bytes\n var parts = []\n var maxChunkLength = 16383 // must be multiple of 3\n\n // go through the array every three bytes, we'll deal with trailing stuff later\n for (var i = 0, len2 = len - extraBytes; i < len2; i += maxChunkLength) {\n parts.push(encodeChunk(\n uint8, i, (i + maxChunkLength) > len2 ? len2 : (i + maxChunkLength)\n ))\n }\n\n // pad the end with zeros, but make sure to not forget the extra bytes\n if (extraBytes === 1) {\n tmp = uint8[len - 1]\n parts.push(\n lookup[tmp >> 2] +\n lookup[(tmp << 4) & 0x3F] +\n '=='\n )\n } else if (extraBytes === 2) {\n tmp = (uint8[len - 2] << 8) + uint8[len - 1]\n parts.push(\n lookup[tmp >> 10] +\n lookup[(tmp >> 4) & 0x3F] +\n lookup[(tmp << 2) & 0x3F] +\n '='\n )\n }\n\n return parts.join('')\n}\n\n\n//# sourceURL=webpack:///./node_modules/base64-js/index.js?");
-
-/***/ }),
-
-/***/ "./node_modules/buffer/index.js":
-/*!**************************************!*\
- !*** ./node_modules/buffer/index.js ***!
- \**************************************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-"use strict";
-eval("/* WEBPACK VAR INJECTION */(function(global) {/*!\n * The buffer module from node.js, for the browser.\n *\n * @author Feross Aboukhadijeh \n * @license MIT\n */\n/* eslint-disable no-proto */\n\n\n\nvar base64 = __webpack_require__(/*! base64-js */ \"./node_modules/base64-js/index.js\")\nvar ieee754 = __webpack_require__(/*! ieee754 */ \"./node_modules/ieee754/index.js\")\nvar isArray = __webpack_require__(/*! isarray */ \"./node_modules/isarray/index.js\")\n\nexports.Buffer = Buffer\nexports.SlowBuffer = SlowBuffer\nexports.INSPECT_MAX_BYTES = 50\n\n/**\n * If `Buffer.TYPED_ARRAY_SUPPORT`:\n * === true Use Uint8Array implementation (fastest)\n * === false Use Object implementation (most compatible, even IE6)\n *\n * Browsers that support typed arrays are IE 10+, Firefox 4+, Chrome 7+, Safari 5.1+,\n * Opera 11.6+, iOS 4.2+.\n *\n * Due to various browser bugs, sometimes the Object implementation will be used even\n * when the browser supports typed arrays.\n *\n * Note:\n *\n * - Firefox 4-29 lacks support for adding new properties to `Uint8Array` instances,\n * See: https://bugzilla.mozilla.org/show_bug.cgi?id=695438.\n *\n * - Chrome 9-10 is missing the `TypedArray.prototype.subarray` function.\n *\n * - IE10 has a broken `TypedArray.prototype.subarray` function which returns arrays of\n * incorrect length in some situations.\n\n * We detect these buggy browsers and set `Buffer.TYPED_ARRAY_SUPPORT` to `false` so they\n * get the Object implementation, which is slower but behaves correctly.\n */\nBuffer.TYPED_ARRAY_SUPPORT = global.TYPED_ARRAY_SUPPORT !== undefined\n ? global.TYPED_ARRAY_SUPPORT\n : typedArraySupport()\n\n/*\n * Export kMaxLength after typed array support is determined.\n */\nexports.kMaxLength = kMaxLength()\n\nfunction typedArraySupport () {\n try {\n var arr = new Uint8Array(1)\n arr.__proto__ = {__proto__: Uint8Array.prototype, foo: function () { return 42 }}\n return arr.foo() === 42 && // typed array instances can be augmented\n typeof arr.subarray === 'function' && // chrome 9-10 lack `subarray`\n arr.subarray(1, 1).byteLength === 0 // ie10 has broken `subarray`\n } catch (e) {\n return false\n }\n}\n\nfunction kMaxLength () {\n return Buffer.TYPED_ARRAY_SUPPORT\n ? 0x7fffffff\n : 0x3fffffff\n}\n\nfunction createBuffer (that, length) {\n if (kMaxLength() < length) {\n throw new RangeError('Invalid typed array length')\n }\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = new Uint8Array(length)\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n if (that === null) {\n that = new Buffer(length)\n }\n that.length = length\n }\n\n return that\n}\n\n/**\n * The Buffer constructor returns instances of `Uint8Array` that have their\n * prototype changed to `Buffer.prototype`. Furthermore, `Buffer` is a subclass of\n * `Uint8Array`, so the returned instances will have all the node `Buffer` methods\n * and the `Uint8Array` methods. Square bracket notation works as expected -- it\n * returns a single octet.\n *\n * The `Uint8Array` prototype remains unmodified.\n */\n\nfunction Buffer (arg, encodingOrOffset, length) {\n if (!Buffer.TYPED_ARRAY_SUPPORT && !(this instanceof Buffer)) {\n return new Buffer(arg, encodingOrOffset, length)\n }\n\n // Common case.\n if (typeof arg === 'number') {\n if (typeof encodingOrOffset === 'string') {\n throw new Error(\n 'If encoding is specified then the first argument must be a string'\n )\n }\n return allocUnsafe(this, arg)\n }\n return from(this, arg, encodingOrOffset, length)\n}\n\nBuffer.poolSize = 8192 // not used by this implementation\n\n// TODO: Legacy, not needed anymore. Remove in next major version.\nBuffer._augment = function (arr) {\n arr.__proto__ = Buffer.prototype\n return arr\n}\n\nfunction from (that, value, encodingOrOffset, length) {\n if (typeof value === 'number') {\n throw new TypeError('\"value\" argument must not be a number')\n }\n\n if (typeof ArrayBuffer !== 'undefined' && value instanceof ArrayBuffer) {\n return fromArrayBuffer(that, value, encodingOrOffset, length)\n }\n\n if (typeof value === 'string') {\n return fromString(that, value, encodingOrOffset)\n }\n\n return fromObject(that, value)\n}\n\n/**\n * Functionally equivalent to Buffer(arg, encoding) but throws a TypeError\n * if value is a number.\n * Buffer.from(str[, encoding])\n * Buffer.from(array)\n * Buffer.from(buffer)\n * Buffer.from(arrayBuffer[, byteOffset[, length]])\n **/\nBuffer.from = function (value, encodingOrOffset, length) {\n return from(null, value, encodingOrOffset, length)\n}\n\nif (Buffer.TYPED_ARRAY_SUPPORT) {\n Buffer.prototype.__proto__ = Uint8Array.prototype\n Buffer.__proto__ = Uint8Array\n if (typeof Symbol !== 'undefined' && Symbol.species &&\n Buffer[Symbol.species] === Buffer) {\n // Fix subarray() in ES2016. See: https://github.com/feross/buffer/pull/97\n Object.defineProperty(Buffer, Symbol.species, {\n value: null,\n configurable: true\n })\n }\n}\n\nfunction assertSize (size) {\n if (typeof size !== 'number') {\n throw new TypeError('\"size\" argument must be a number')\n } else if (size < 0) {\n throw new RangeError('\"size\" argument must not be negative')\n }\n}\n\nfunction alloc (that, size, fill, encoding) {\n assertSize(size)\n if (size <= 0) {\n return createBuffer(that, size)\n }\n if (fill !== undefined) {\n // Only pay attention to encoding if it's a string. This\n // prevents accidentally sending in a number that would\n // be interpretted as a start offset.\n return typeof encoding === 'string'\n ? createBuffer(that, size).fill(fill, encoding)\n : createBuffer(that, size).fill(fill)\n }\n return createBuffer(that, size)\n}\n\n/**\n * Creates a new filled Buffer instance.\n * alloc(size[, fill[, encoding]])\n **/\nBuffer.alloc = function (size, fill, encoding) {\n return alloc(null, size, fill, encoding)\n}\n\nfunction allocUnsafe (that, size) {\n assertSize(size)\n that = createBuffer(that, size < 0 ? 0 : checked(size) | 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) {\n for (var i = 0; i < size; ++i) {\n that[i] = 0\n }\n }\n return that\n}\n\n/**\n * Equivalent to Buffer(num), by default creates a non-zero-filled Buffer instance.\n * */\nBuffer.allocUnsafe = function (size) {\n return allocUnsafe(null, size)\n}\n/**\n * Equivalent to SlowBuffer(num), by default creates a non-zero-filled Buffer instance.\n */\nBuffer.allocUnsafeSlow = function (size) {\n return allocUnsafe(null, size)\n}\n\nfunction fromString (that, string, encoding) {\n if (typeof encoding !== 'string' || encoding === '') {\n encoding = 'utf8'\n }\n\n if (!Buffer.isEncoding(encoding)) {\n throw new TypeError('\"encoding\" must be a valid string encoding')\n }\n\n var length = byteLength(string, encoding) | 0\n that = createBuffer(that, length)\n\n var actual = that.write(string, encoding)\n\n if (actual !== length) {\n // Writing a hex string, for example, that contains invalid characters will\n // cause everything after the first invalid character to be ignored. (e.g.\n // 'abxxcd' will be treated as 'ab')\n that = that.slice(0, actual)\n }\n\n return that\n}\n\nfunction fromArrayLike (that, array) {\n var length = array.length < 0 ? 0 : checked(array.length) | 0\n that = createBuffer(that, length)\n for (var i = 0; i < length; i += 1) {\n that[i] = array[i] & 255\n }\n return that\n}\n\nfunction fromArrayBuffer (that, array, byteOffset, length) {\n array.byteLength // this throws if `array` is not a valid ArrayBuffer\n\n if (byteOffset < 0 || array.byteLength < byteOffset) {\n throw new RangeError('\\'offset\\' is out of bounds')\n }\n\n if (array.byteLength < byteOffset + (length || 0)) {\n throw new RangeError('\\'length\\' is out of bounds')\n }\n\n if (byteOffset === undefined && length === undefined) {\n array = new Uint8Array(array)\n } else if (length === undefined) {\n array = new Uint8Array(array, byteOffset)\n } else {\n array = new Uint8Array(array, byteOffset, length)\n }\n\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n // Return an augmented `Uint8Array` instance, for best performance\n that = array\n that.__proto__ = Buffer.prototype\n } else {\n // Fallback: Return an object instance of the Buffer class\n that = fromArrayLike(that, array)\n }\n return that\n}\n\nfunction fromObject (that, obj) {\n if (Buffer.isBuffer(obj)) {\n var len = checked(obj.length) | 0\n that = createBuffer(that, len)\n\n if (that.length === 0) {\n return that\n }\n\n obj.copy(that, 0, 0, len)\n return that\n }\n\n if (obj) {\n if ((typeof ArrayBuffer !== 'undefined' &&\n obj.buffer instanceof ArrayBuffer) || 'length' in obj) {\n if (typeof obj.length !== 'number' || isnan(obj.length)) {\n return createBuffer(that, 0)\n }\n return fromArrayLike(that, obj)\n }\n\n if (obj.type === 'Buffer' && isArray(obj.data)) {\n return fromArrayLike(that, obj.data)\n }\n }\n\n throw new TypeError('First argument must be a string, Buffer, ArrayBuffer, Array, or array-like object.')\n}\n\nfunction checked (length) {\n // Note: cannot use `length < kMaxLength()` here because that fails when\n // length is NaN (which is otherwise coerced to zero.)\n if (length >= kMaxLength()) {\n throw new RangeError('Attempt to allocate Buffer larger than maximum ' +\n 'size: 0x' + kMaxLength().toString(16) + ' bytes')\n }\n return length | 0\n}\n\nfunction SlowBuffer (length) {\n if (+length != length) { // eslint-disable-line eqeqeq\n length = 0\n }\n return Buffer.alloc(+length)\n}\n\nBuffer.isBuffer = function isBuffer (b) {\n return !!(b != null && b._isBuffer)\n}\n\nBuffer.compare = function compare (a, b) {\n if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {\n throw new TypeError('Arguments must be Buffers')\n }\n\n if (a === b) return 0\n\n var x = a.length\n var y = b.length\n\n for (var i = 0, len = Math.min(x, y); i < len; ++i) {\n if (a[i] !== b[i]) {\n x = a[i]\n y = b[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\nBuffer.isEncoding = function isEncoding (encoding) {\n switch (String(encoding).toLowerCase()) {\n case 'hex':\n case 'utf8':\n case 'utf-8':\n case 'ascii':\n case 'latin1':\n case 'binary':\n case 'base64':\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return true\n default:\n return false\n }\n}\n\nBuffer.concat = function concat (list, length) {\n if (!isArray(list)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n\n if (list.length === 0) {\n return Buffer.alloc(0)\n }\n\n var i\n if (length === undefined) {\n length = 0\n for (i = 0; i < list.length; ++i) {\n length += list[i].length\n }\n }\n\n var buffer = Buffer.allocUnsafe(length)\n var pos = 0\n for (i = 0; i < list.length; ++i) {\n var buf = list[i]\n if (!Buffer.isBuffer(buf)) {\n throw new TypeError('\"list\" argument must be an Array of Buffers')\n }\n buf.copy(buffer, pos)\n pos += buf.length\n }\n return buffer\n}\n\nfunction byteLength (string, encoding) {\n if (Buffer.isBuffer(string)) {\n return string.length\n }\n if (typeof ArrayBuffer !== 'undefined' && typeof ArrayBuffer.isView === 'function' &&\n (ArrayBuffer.isView(string) || string instanceof ArrayBuffer)) {\n return string.byteLength\n }\n if (typeof string !== 'string') {\n string = '' + string\n }\n\n var len = string.length\n if (len === 0) return 0\n\n // Use a for loop to avoid recursion\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'ascii':\n case 'latin1':\n case 'binary':\n return len\n case 'utf8':\n case 'utf-8':\n case undefined:\n return utf8ToBytes(string).length\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return len * 2\n case 'hex':\n return len >>> 1\n case 'base64':\n return base64ToBytes(string).length\n default:\n if (loweredCase) return utf8ToBytes(string).length // assume utf8\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\nBuffer.byteLength = byteLength\n\nfunction slowToString (encoding, start, end) {\n var loweredCase = false\n\n // No need to verify that \"this.length <= MAX_UINT32\" since it's a read-only\n // property of a typed array.\n\n // This behaves neither like String nor Uint8Array in that we set start/end\n // to their upper/lower bounds if the value passed is out of range.\n // undefined is handled specially as per ECMA-262 6th Edition,\n // Section 13.3.3.7 Runtime Semantics: KeyedBindingInitialization.\n if (start === undefined || start < 0) {\n start = 0\n }\n // Return early if start > this.length. Done here to prevent potential uint32\n // coercion fail below.\n if (start > this.length) {\n return ''\n }\n\n if (end === undefined || end > this.length) {\n end = this.length\n }\n\n if (end <= 0) {\n return ''\n }\n\n // Force coersion to uint32. This will also coerce falsey/NaN values to 0.\n end >>>= 0\n start >>>= 0\n\n if (end <= start) {\n return ''\n }\n\n if (!encoding) encoding = 'utf8'\n\n while (true) {\n switch (encoding) {\n case 'hex':\n return hexSlice(this, start, end)\n\n case 'utf8':\n case 'utf-8':\n return utf8Slice(this, start, end)\n\n case 'ascii':\n return asciiSlice(this, start, end)\n\n case 'latin1':\n case 'binary':\n return latin1Slice(this, start, end)\n\n case 'base64':\n return base64Slice(this, start, end)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return utf16leSlice(this, start, end)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = (encoding + '').toLowerCase()\n loweredCase = true\n }\n }\n}\n\n// The property is used by `Buffer.isBuffer` and `is-buffer` (in Safari 5-7) to detect\n// Buffer instances.\nBuffer.prototype._isBuffer = true\n\nfunction swap (b, n, m) {\n var i = b[n]\n b[n] = b[m]\n b[m] = i\n}\n\nBuffer.prototype.swap16 = function swap16 () {\n var len = this.length\n if (len % 2 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 16-bits')\n }\n for (var i = 0; i < len; i += 2) {\n swap(this, i, i + 1)\n }\n return this\n}\n\nBuffer.prototype.swap32 = function swap32 () {\n var len = this.length\n if (len % 4 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 32-bits')\n }\n for (var i = 0; i < len; i += 4) {\n swap(this, i, i + 3)\n swap(this, i + 1, i + 2)\n }\n return this\n}\n\nBuffer.prototype.swap64 = function swap64 () {\n var len = this.length\n if (len % 8 !== 0) {\n throw new RangeError('Buffer size must be a multiple of 64-bits')\n }\n for (var i = 0; i < len; i += 8) {\n swap(this, i, i + 7)\n swap(this, i + 1, i + 6)\n swap(this, i + 2, i + 5)\n swap(this, i + 3, i + 4)\n }\n return this\n}\n\nBuffer.prototype.toString = function toString () {\n var length = this.length | 0\n if (length === 0) return ''\n if (arguments.length === 0) return utf8Slice(this, 0, length)\n return slowToString.apply(this, arguments)\n}\n\nBuffer.prototype.equals = function equals (b) {\n if (!Buffer.isBuffer(b)) throw new TypeError('Argument must be a Buffer')\n if (this === b) return true\n return Buffer.compare(this, b) === 0\n}\n\nBuffer.prototype.inspect = function inspect () {\n var str = ''\n var max = exports.INSPECT_MAX_BYTES\n if (this.length > 0) {\n str = this.toString('hex', 0, max).match(/.{2}/g).join(' ')\n if (this.length > max) str += ' ... '\n }\n return ''\n}\n\nBuffer.prototype.compare = function compare (target, start, end, thisStart, thisEnd) {\n if (!Buffer.isBuffer(target)) {\n throw new TypeError('Argument must be a Buffer')\n }\n\n if (start === undefined) {\n start = 0\n }\n if (end === undefined) {\n end = target ? target.length : 0\n }\n if (thisStart === undefined) {\n thisStart = 0\n }\n if (thisEnd === undefined) {\n thisEnd = this.length\n }\n\n if (start < 0 || end > target.length || thisStart < 0 || thisEnd > this.length) {\n throw new RangeError('out of range index')\n }\n\n if (thisStart >= thisEnd && start >= end) {\n return 0\n }\n if (thisStart >= thisEnd) {\n return -1\n }\n if (start >= end) {\n return 1\n }\n\n start >>>= 0\n end >>>= 0\n thisStart >>>= 0\n thisEnd >>>= 0\n\n if (this === target) return 0\n\n var x = thisEnd - thisStart\n var y = end - start\n var len = Math.min(x, y)\n\n var thisCopy = this.slice(thisStart, thisEnd)\n var targetCopy = target.slice(start, end)\n\n for (var i = 0; i < len; ++i) {\n if (thisCopy[i] !== targetCopy[i]) {\n x = thisCopy[i]\n y = targetCopy[i]\n break\n }\n }\n\n if (x < y) return -1\n if (y < x) return 1\n return 0\n}\n\n// Finds either the first index of `val` in `buffer` at offset >= `byteOffset`,\n// OR the last index of `val` in `buffer` at offset <= `byteOffset`.\n//\n// Arguments:\n// - buffer - a Buffer to search\n// - val - a string, Buffer, or number\n// - byteOffset - an index into `buffer`; will be clamped to an int32\n// - encoding - an optional encoding, relevant is val is a string\n// - dir - true for indexOf, false for lastIndexOf\nfunction bidirectionalIndexOf (buffer, val, byteOffset, encoding, dir) {\n // Empty buffer means no match\n if (buffer.length === 0) return -1\n\n // Normalize byteOffset\n if (typeof byteOffset === 'string') {\n encoding = byteOffset\n byteOffset = 0\n } else if (byteOffset > 0x7fffffff) {\n byteOffset = 0x7fffffff\n } else if (byteOffset < -0x80000000) {\n byteOffset = -0x80000000\n }\n byteOffset = +byteOffset // Coerce to Number.\n if (isNaN(byteOffset)) {\n // byteOffset: it it's undefined, null, NaN, \"foo\", etc, search whole buffer\n byteOffset = dir ? 0 : (buffer.length - 1)\n }\n\n // Normalize byteOffset: negative offsets start from the end of the buffer\n if (byteOffset < 0) byteOffset = buffer.length + byteOffset\n if (byteOffset >= buffer.length) {\n if (dir) return -1\n else byteOffset = buffer.length - 1\n } else if (byteOffset < 0) {\n if (dir) byteOffset = 0\n else return -1\n }\n\n // Normalize val\n if (typeof val === 'string') {\n val = Buffer.from(val, encoding)\n }\n\n // Finally, search either indexOf (if dir is true) or lastIndexOf\n if (Buffer.isBuffer(val)) {\n // Special case: looking for empty string/buffer always fails\n if (val.length === 0) {\n return -1\n }\n return arrayIndexOf(buffer, val, byteOffset, encoding, dir)\n } else if (typeof val === 'number') {\n val = val & 0xFF // Search for a byte value [0-255]\n if (Buffer.TYPED_ARRAY_SUPPORT &&\n typeof Uint8Array.prototype.indexOf === 'function') {\n if (dir) {\n return Uint8Array.prototype.indexOf.call(buffer, val, byteOffset)\n } else {\n return Uint8Array.prototype.lastIndexOf.call(buffer, val, byteOffset)\n }\n }\n return arrayIndexOf(buffer, [ val ], byteOffset, encoding, dir)\n }\n\n throw new TypeError('val must be string, number or Buffer')\n}\n\nfunction arrayIndexOf (arr, val, byteOffset, encoding, dir) {\n var indexSize = 1\n var arrLength = arr.length\n var valLength = val.length\n\n if (encoding !== undefined) {\n encoding = String(encoding).toLowerCase()\n if (encoding === 'ucs2' || encoding === 'ucs-2' ||\n encoding === 'utf16le' || encoding === 'utf-16le') {\n if (arr.length < 2 || val.length < 2) {\n return -1\n }\n indexSize = 2\n arrLength /= 2\n valLength /= 2\n byteOffset /= 2\n }\n }\n\n function read (buf, i) {\n if (indexSize === 1) {\n return buf[i]\n } else {\n return buf.readUInt16BE(i * indexSize)\n }\n }\n\n var i\n if (dir) {\n var foundIndex = -1\n for (i = byteOffset; i < arrLength; i++) {\n if (read(arr, i) === read(val, foundIndex === -1 ? 0 : i - foundIndex)) {\n if (foundIndex === -1) foundIndex = i\n if (i - foundIndex + 1 === valLength) return foundIndex * indexSize\n } else {\n if (foundIndex !== -1) i -= i - foundIndex\n foundIndex = -1\n }\n }\n } else {\n if (byteOffset + valLength > arrLength) byteOffset = arrLength - valLength\n for (i = byteOffset; i >= 0; i--) {\n var found = true\n for (var j = 0; j < valLength; j++) {\n if (read(arr, i + j) !== read(val, j)) {\n found = false\n break\n }\n }\n if (found) return i\n }\n }\n\n return -1\n}\n\nBuffer.prototype.includes = function includes (val, byteOffset, encoding) {\n return this.indexOf(val, byteOffset, encoding) !== -1\n}\n\nBuffer.prototype.indexOf = function indexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, true)\n}\n\nBuffer.prototype.lastIndexOf = function lastIndexOf (val, byteOffset, encoding) {\n return bidirectionalIndexOf(this, val, byteOffset, encoding, false)\n}\n\nfunction hexWrite (buf, string, offset, length) {\n offset = Number(offset) || 0\n var remaining = buf.length - offset\n if (!length) {\n length = remaining\n } else {\n length = Number(length)\n if (length > remaining) {\n length = remaining\n }\n }\n\n // must be an even number of digits\n var strLen = string.length\n if (strLen % 2 !== 0) throw new TypeError('Invalid hex string')\n\n if (length > strLen / 2) {\n length = strLen / 2\n }\n for (var i = 0; i < length; ++i) {\n var parsed = parseInt(string.substr(i * 2, 2), 16)\n if (isNaN(parsed)) return i\n buf[offset + i] = parsed\n }\n return i\n}\n\nfunction utf8Write (buf, string, offset, length) {\n return blitBuffer(utf8ToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nfunction asciiWrite (buf, string, offset, length) {\n return blitBuffer(asciiToBytes(string), buf, offset, length)\n}\n\nfunction latin1Write (buf, string, offset, length) {\n return asciiWrite(buf, string, offset, length)\n}\n\nfunction base64Write (buf, string, offset, length) {\n return blitBuffer(base64ToBytes(string), buf, offset, length)\n}\n\nfunction ucs2Write (buf, string, offset, length) {\n return blitBuffer(utf16leToBytes(string, buf.length - offset), buf, offset, length)\n}\n\nBuffer.prototype.write = function write (string, offset, length, encoding) {\n // Buffer#write(string)\n if (offset === undefined) {\n encoding = 'utf8'\n length = this.length\n offset = 0\n // Buffer#write(string, encoding)\n } else if (length === undefined && typeof offset === 'string') {\n encoding = offset\n length = this.length\n offset = 0\n // Buffer#write(string, offset[, length][, encoding])\n } else if (isFinite(offset)) {\n offset = offset | 0\n if (isFinite(length)) {\n length = length | 0\n if (encoding === undefined) encoding = 'utf8'\n } else {\n encoding = length\n length = undefined\n }\n // legacy write(string, encoding, offset, length) - remove in v0.13\n } else {\n throw new Error(\n 'Buffer.write(string, encoding, offset[, length]) is no longer supported'\n )\n }\n\n var remaining = this.length - offset\n if (length === undefined || length > remaining) length = remaining\n\n if ((string.length > 0 && (length < 0 || offset < 0)) || offset > this.length) {\n throw new RangeError('Attempt to write outside buffer bounds')\n }\n\n if (!encoding) encoding = 'utf8'\n\n var loweredCase = false\n for (;;) {\n switch (encoding) {\n case 'hex':\n return hexWrite(this, string, offset, length)\n\n case 'utf8':\n case 'utf-8':\n return utf8Write(this, string, offset, length)\n\n case 'ascii':\n return asciiWrite(this, string, offset, length)\n\n case 'latin1':\n case 'binary':\n return latin1Write(this, string, offset, length)\n\n case 'base64':\n // Warning: maxLength not taken into account in base64Write\n return base64Write(this, string, offset, length)\n\n case 'ucs2':\n case 'ucs-2':\n case 'utf16le':\n case 'utf-16le':\n return ucs2Write(this, string, offset, length)\n\n default:\n if (loweredCase) throw new TypeError('Unknown encoding: ' + encoding)\n encoding = ('' + encoding).toLowerCase()\n loweredCase = true\n }\n }\n}\n\nBuffer.prototype.toJSON = function toJSON () {\n return {\n type: 'Buffer',\n data: Array.prototype.slice.call(this._arr || this, 0)\n }\n}\n\nfunction base64Slice (buf, start, end) {\n if (start === 0 && end === buf.length) {\n return base64.fromByteArray(buf)\n } else {\n return base64.fromByteArray(buf.slice(start, end))\n }\n}\n\nfunction utf8Slice (buf, start, end) {\n end = Math.min(buf.length, end)\n var res = []\n\n var i = start\n while (i < end) {\n var firstByte = buf[i]\n var codePoint = null\n var bytesPerSequence = (firstByte > 0xEF) ? 4\n : (firstByte > 0xDF) ? 3\n : (firstByte > 0xBF) ? 2\n : 1\n\n if (i + bytesPerSequence <= end) {\n var secondByte, thirdByte, fourthByte, tempCodePoint\n\n switch (bytesPerSequence) {\n case 1:\n if (firstByte < 0x80) {\n codePoint = firstByte\n }\n break\n case 2:\n secondByte = buf[i + 1]\n if ((secondByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0x1F) << 0x6 | (secondByte & 0x3F)\n if (tempCodePoint > 0x7F) {\n codePoint = tempCodePoint\n }\n }\n break\n case 3:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0xC | (secondByte & 0x3F) << 0x6 | (thirdByte & 0x3F)\n if (tempCodePoint > 0x7FF && (tempCodePoint < 0xD800 || tempCodePoint > 0xDFFF)) {\n codePoint = tempCodePoint\n }\n }\n break\n case 4:\n secondByte = buf[i + 1]\n thirdByte = buf[i + 2]\n fourthByte = buf[i + 3]\n if ((secondByte & 0xC0) === 0x80 && (thirdByte & 0xC0) === 0x80 && (fourthByte & 0xC0) === 0x80) {\n tempCodePoint = (firstByte & 0xF) << 0x12 | (secondByte & 0x3F) << 0xC | (thirdByte & 0x3F) << 0x6 | (fourthByte & 0x3F)\n if (tempCodePoint > 0xFFFF && tempCodePoint < 0x110000) {\n codePoint = tempCodePoint\n }\n }\n }\n }\n\n if (codePoint === null) {\n // we did not generate a valid codePoint so insert a\n // replacement char (U+FFFD) and advance only 1 byte\n codePoint = 0xFFFD\n bytesPerSequence = 1\n } else if (codePoint > 0xFFFF) {\n // encode to utf16 (surrogate pair dance)\n codePoint -= 0x10000\n res.push(codePoint >>> 10 & 0x3FF | 0xD800)\n codePoint = 0xDC00 | codePoint & 0x3FF\n }\n\n res.push(codePoint)\n i += bytesPerSequence\n }\n\n return decodeCodePointsArray(res)\n}\n\n// Based on http://stackoverflow.com/a/22747272/680742, the browser with\n// the lowest limit is Chrome, with 0x10000 args.\n// We go 1 magnitude less, for safety\nvar MAX_ARGUMENTS_LENGTH = 0x1000\n\nfunction decodeCodePointsArray (codePoints) {\n var len = codePoints.length\n if (len <= MAX_ARGUMENTS_LENGTH) {\n return String.fromCharCode.apply(String, codePoints) // avoid extra slice()\n }\n\n // Decode in chunks to avoid \"call stack size exceeded\".\n var res = ''\n var i = 0\n while (i < len) {\n res += String.fromCharCode.apply(\n String,\n codePoints.slice(i, i += MAX_ARGUMENTS_LENGTH)\n )\n }\n return res\n}\n\nfunction asciiSlice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i] & 0x7F)\n }\n return ret\n}\n\nfunction latin1Slice (buf, start, end) {\n var ret = ''\n end = Math.min(buf.length, end)\n\n for (var i = start; i < end; ++i) {\n ret += String.fromCharCode(buf[i])\n }\n return ret\n}\n\nfunction hexSlice (buf, start, end) {\n var len = buf.length\n\n if (!start || start < 0) start = 0\n if (!end || end < 0 || end > len) end = len\n\n var out = ''\n for (var i = start; i < end; ++i) {\n out += toHex(buf[i])\n }\n return out\n}\n\nfunction utf16leSlice (buf, start, end) {\n var bytes = buf.slice(start, end)\n var res = ''\n for (var i = 0; i < bytes.length; i += 2) {\n res += String.fromCharCode(bytes[i] + bytes[i + 1] * 256)\n }\n return res\n}\n\nBuffer.prototype.slice = function slice (start, end) {\n var len = this.length\n start = ~~start\n end = end === undefined ? len : ~~end\n\n if (start < 0) {\n start += len\n if (start < 0) start = 0\n } else if (start > len) {\n start = len\n }\n\n if (end < 0) {\n end += len\n if (end < 0) end = 0\n } else if (end > len) {\n end = len\n }\n\n if (end < start) end = start\n\n var newBuf\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n newBuf = this.subarray(start, end)\n newBuf.__proto__ = Buffer.prototype\n } else {\n var sliceLen = end - start\n newBuf = new Buffer(sliceLen, undefined)\n for (var i = 0; i < sliceLen; ++i) {\n newBuf[i] = this[i + start]\n }\n }\n\n return newBuf\n}\n\n/*\n * Need to make sure that buffer isn't trying to write out of bounds.\n */\nfunction checkOffset (offset, ext, length) {\n if ((offset % 1) !== 0 || offset < 0) throw new RangeError('offset is not uint')\n if (offset + ext > length) throw new RangeError('Trying to access beyond buffer length')\n}\n\nBuffer.prototype.readUIntLE = function readUIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUIntBE = function readUIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n checkOffset(offset, byteLength, this.length)\n }\n\n var val = this[offset + --byteLength]\n var mul = 1\n while (byteLength > 0 && (mul *= 0x100)) {\n val += this[offset + --byteLength] * mul\n }\n\n return val\n}\n\nBuffer.prototype.readUInt8 = function readUInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n return this[offset]\n}\n\nBuffer.prototype.readUInt16LE = function readUInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return this[offset] | (this[offset + 1] << 8)\n}\n\nBuffer.prototype.readUInt16BE = function readUInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n return (this[offset] << 8) | this[offset + 1]\n}\n\nBuffer.prototype.readUInt32LE = function readUInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return ((this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16)) +\n (this[offset + 3] * 0x1000000)\n}\n\nBuffer.prototype.readUInt32BE = function readUInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] * 0x1000000) +\n ((this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n this[offset + 3])\n}\n\nBuffer.prototype.readIntLE = function readIntLE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var val = this[offset]\n var mul = 1\n var i = 0\n while (++i < byteLength && (mul *= 0x100)) {\n val += this[offset + i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readIntBE = function readIntBE (offset, byteLength, noAssert) {\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) checkOffset(offset, byteLength, this.length)\n\n var i = byteLength\n var mul = 1\n var val = this[offset + --i]\n while (i > 0 && (mul *= 0x100)) {\n val += this[offset + --i] * mul\n }\n mul *= 0x80\n\n if (val >= mul) val -= Math.pow(2, 8 * byteLength)\n\n return val\n}\n\nBuffer.prototype.readInt8 = function readInt8 (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 1, this.length)\n if (!(this[offset] & 0x80)) return (this[offset])\n return ((0xff - this[offset] + 1) * -1)\n}\n\nBuffer.prototype.readInt16LE = function readInt16LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset] | (this[offset + 1] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt16BE = function readInt16BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 2, this.length)\n var val = this[offset + 1] | (this[offset] << 8)\n return (val & 0x8000) ? val | 0xFFFF0000 : val\n}\n\nBuffer.prototype.readInt32LE = function readInt32LE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset]) |\n (this[offset + 1] << 8) |\n (this[offset + 2] << 16) |\n (this[offset + 3] << 24)\n}\n\nBuffer.prototype.readInt32BE = function readInt32BE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n\n return (this[offset] << 24) |\n (this[offset + 1] << 16) |\n (this[offset + 2] << 8) |\n (this[offset + 3])\n}\n\nBuffer.prototype.readFloatLE = function readFloatLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, true, 23, 4)\n}\n\nBuffer.prototype.readFloatBE = function readFloatBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 4, this.length)\n return ieee754.read(this, offset, false, 23, 4)\n}\n\nBuffer.prototype.readDoubleLE = function readDoubleLE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, true, 52, 8)\n}\n\nBuffer.prototype.readDoubleBE = function readDoubleBE (offset, noAssert) {\n if (!noAssert) checkOffset(offset, 8, this.length)\n return ieee754.read(this, offset, false, 52, 8)\n}\n\nfunction checkInt (buf, value, offset, ext, max, min) {\n if (!Buffer.isBuffer(buf)) throw new TypeError('\"buffer\" argument must be a Buffer instance')\n if (value > max || value < min) throw new RangeError('\"value\" argument is out of bounds')\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n}\n\nBuffer.prototype.writeUIntLE = function writeUIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var mul = 1\n var i = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUIntBE = function writeUIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n byteLength = byteLength | 0\n if (!noAssert) {\n var maxBytes = Math.pow(2, 8 * byteLength) - 1\n checkInt(this, value, offset, byteLength, maxBytes, 0)\n }\n\n var i = byteLength - 1\n var mul = 1\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n this[offset + i] = (value / mul) & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeUInt8 = function writeUInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0xff, 0)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nfunction objectWriteUInt16 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 2); i < j; ++i) {\n buf[offset + i] = (value & (0xff << (8 * (littleEndian ? i : 1 - i)))) >>>\n (littleEndian ? i : 1 - i) * 8\n }\n}\n\nBuffer.prototype.writeUInt16LE = function writeUInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeUInt16BE = function writeUInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0xffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nfunction objectWriteUInt32 (buf, value, offset, littleEndian) {\n if (value < 0) value = 0xffffffff + value + 1\n for (var i = 0, j = Math.min(buf.length - offset, 4); i < j; ++i) {\n buf[offset + i] = (value >>> (littleEndian ? i : 3 - i) * 8) & 0xff\n }\n}\n\nBuffer.prototype.writeUInt32LE = function writeUInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset + 3] = (value >>> 24)\n this[offset + 2] = (value >>> 16)\n this[offset + 1] = (value >>> 8)\n this[offset] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeUInt32BE = function writeUInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0xffffffff, 0)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeIntLE = function writeIntLE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = 0\n var mul = 1\n var sub = 0\n this[offset] = value & 0xFF\n while (++i < byteLength && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i - 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeIntBE = function writeIntBE (value, offset, byteLength, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) {\n var limit = Math.pow(2, 8 * byteLength - 1)\n\n checkInt(this, value, offset, byteLength, limit - 1, -limit)\n }\n\n var i = byteLength - 1\n var mul = 1\n var sub = 0\n this[offset + i] = value & 0xFF\n while (--i >= 0 && (mul *= 0x100)) {\n if (value < 0 && sub === 0 && this[offset + i + 1] !== 0) {\n sub = 1\n }\n this[offset + i] = ((value / mul) >> 0) - sub & 0xFF\n }\n\n return offset + byteLength\n}\n\nBuffer.prototype.writeInt8 = function writeInt8 (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 1, 0x7f, -0x80)\n if (!Buffer.TYPED_ARRAY_SUPPORT) value = Math.floor(value)\n if (value < 0) value = 0xff + value + 1\n this[offset] = (value & 0xff)\n return offset + 1\n}\n\nBuffer.prototype.writeInt16LE = function writeInt16LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n } else {\n objectWriteUInt16(this, value, offset, true)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt16BE = function writeInt16BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 2, 0x7fff, -0x8000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 8)\n this[offset + 1] = (value & 0xff)\n } else {\n objectWriteUInt16(this, value, offset, false)\n }\n return offset + 2\n}\n\nBuffer.prototype.writeInt32LE = function writeInt32LE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value & 0xff)\n this[offset + 1] = (value >>> 8)\n this[offset + 2] = (value >>> 16)\n this[offset + 3] = (value >>> 24)\n } else {\n objectWriteUInt32(this, value, offset, true)\n }\n return offset + 4\n}\n\nBuffer.prototype.writeInt32BE = function writeInt32BE (value, offset, noAssert) {\n value = +value\n offset = offset | 0\n if (!noAssert) checkInt(this, value, offset, 4, 0x7fffffff, -0x80000000)\n if (value < 0) value = 0xffffffff + value + 1\n if (Buffer.TYPED_ARRAY_SUPPORT) {\n this[offset] = (value >>> 24)\n this[offset + 1] = (value >>> 16)\n this[offset + 2] = (value >>> 8)\n this[offset + 3] = (value & 0xff)\n } else {\n objectWriteUInt32(this, value, offset, false)\n }\n return offset + 4\n}\n\nfunction checkIEEE754 (buf, value, offset, ext, max, min) {\n if (offset + ext > buf.length) throw new RangeError('Index out of range')\n if (offset < 0) throw new RangeError('Index out of range')\n}\n\nfunction writeFloat (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 4, 3.4028234663852886e+38, -3.4028234663852886e+38)\n }\n ieee754.write(buf, value, offset, littleEndian, 23, 4)\n return offset + 4\n}\n\nBuffer.prototype.writeFloatLE = function writeFloatLE (value, offset, noAssert) {\n return writeFloat(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeFloatBE = function writeFloatBE (value, offset, noAssert) {\n return writeFloat(this, value, offset, false, noAssert)\n}\n\nfunction writeDouble (buf, value, offset, littleEndian, noAssert) {\n if (!noAssert) {\n checkIEEE754(buf, value, offset, 8, 1.7976931348623157E+308, -1.7976931348623157E+308)\n }\n ieee754.write(buf, value, offset, littleEndian, 52, 8)\n return offset + 8\n}\n\nBuffer.prototype.writeDoubleLE = function writeDoubleLE (value, offset, noAssert) {\n return writeDouble(this, value, offset, true, noAssert)\n}\n\nBuffer.prototype.writeDoubleBE = function writeDoubleBE (value, offset, noAssert) {\n return writeDouble(this, value, offset, false, noAssert)\n}\n\n// copy(targetBuffer, targetStart=0, sourceStart=0, sourceEnd=buffer.length)\nBuffer.prototype.copy = function copy (target, targetStart, start, end) {\n if (!start) start = 0\n if (!end && end !== 0) end = this.length\n if (targetStart >= target.length) targetStart = target.length\n if (!targetStart) targetStart = 0\n if (end > 0 && end < start) end = start\n\n // Copy 0 bytes; we're done\n if (end === start) return 0\n if (target.length === 0 || this.length === 0) return 0\n\n // Fatal error conditions\n if (targetStart < 0) {\n throw new RangeError('targetStart out of bounds')\n }\n if (start < 0 || start >= this.length) throw new RangeError('sourceStart out of bounds')\n if (end < 0) throw new RangeError('sourceEnd out of bounds')\n\n // Are we oob?\n if (end > this.length) end = this.length\n if (target.length - targetStart < end - start) {\n end = target.length - targetStart + start\n }\n\n var len = end - start\n var i\n\n if (this === target && start < targetStart && targetStart < end) {\n // descending copy from end\n for (i = len - 1; i >= 0; --i) {\n target[i + targetStart] = this[i + start]\n }\n } else if (len < 1000 || !Buffer.TYPED_ARRAY_SUPPORT) {\n // ascending copy from start\n for (i = 0; i < len; ++i) {\n target[i + targetStart] = this[i + start]\n }\n } else {\n Uint8Array.prototype.set.call(\n target,\n this.subarray(start, start + len),\n targetStart\n )\n }\n\n return len\n}\n\n// Usage:\n// buffer.fill(number[, offset[, end]])\n// buffer.fill(buffer[, offset[, end]])\n// buffer.fill(string[, offset[, end]][, encoding])\nBuffer.prototype.fill = function fill (val, start, end, encoding) {\n // Handle string cases:\n if (typeof val === 'string') {\n if (typeof start === 'string') {\n encoding = start\n start = 0\n end = this.length\n } else if (typeof end === 'string') {\n encoding = end\n end = this.length\n }\n if (val.length === 1) {\n var code = val.charCodeAt(0)\n if (code < 256) {\n val = code\n }\n }\n if (encoding !== undefined && typeof encoding !== 'string') {\n throw new TypeError('encoding must be a string')\n }\n if (typeof encoding === 'string' && !Buffer.isEncoding(encoding)) {\n throw new TypeError('Unknown encoding: ' + encoding)\n }\n } else if (typeof val === 'number') {\n val = val & 255\n }\n\n // Invalid ranges are not set to a default, so can range check early.\n if (start < 0 || this.length < start || this.length < end) {\n throw new RangeError('Out of range index')\n }\n\n if (end <= start) {\n return this\n }\n\n start = start >>> 0\n end = end === undefined ? this.length : end >>> 0\n\n if (!val) val = 0\n\n var i\n if (typeof val === 'number') {\n for (i = start; i < end; ++i) {\n this[i] = val\n }\n } else {\n var bytes = Buffer.isBuffer(val)\n ? val\n : utf8ToBytes(new Buffer(val, encoding).toString())\n var len = bytes.length\n for (i = 0; i < end - start; ++i) {\n this[i + start] = bytes[i % len]\n }\n }\n\n return this\n}\n\n// HELPER FUNCTIONS\n// ================\n\nvar INVALID_BASE64_RE = /[^+\\/0-9A-Za-z-_]/g\n\nfunction base64clean (str) {\n // Node strips out invalid characters like \\n and \\t from the string, base64-js does not\n str = stringtrim(str).replace(INVALID_BASE64_RE, '')\n // Node converts strings with length < 2 to ''\n if (str.length < 2) return ''\n // Node allows for non-padded base64 strings (missing trailing ===), base64-js does not\n while (str.length % 4 !== 0) {\n str = str + '='\n }\n return str\n}\n\nfunction stringtrim (str) {\n if (str.trim) return str.trim()\n return str.replace(/^\\s+|\\s+$/g, '')\n}\n\nfunction toHex (n) {\n if (n < 16) return '0' + n.toString(16)\n return n.toString(16)\n}\n\nfunction utf8ToBytes (string, units) {\n units = units || Infinity\n var codePoint\n var length = string.length\n var leadSurrogate = null\n var bytes = []\n\n for (var i = 0; i < length; ++i) {\n codePoint = string.charCodeAt(i)\n\n // is surrogate component\n if (codePoint > 0xD7FF && codePoint < 0xE000) {\n // last char was a lead\n if (!leadSurrogate) {\n // no lead yet\n if (codePoint > 0xDBFF) {\n // unexpected trail\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n } else if (i + 1 === length) {\n // unpaired lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n continue\n }\n\n // valid lead\n leadSurrogate = codePoint\n\n continue\n }\n\n // 2 leads in a row\n if (codePoint < 0xDC00) {\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n leadSurrogate = codePoint\n continue\n }\n\n // valid surrogate pair\n codePoint = (leadSurrogate - 0xD800 << 10 | codePoint - 0xDC00) + 0x10000\n } else if (leadSurrogate) {\n // valid bmp char, but last char was a lead\n if ((units -= 3) > -1) bytes.push(0xEF, 0xBF, 0xBD)\n }\n\n leadSurrogate = null\n\n // encode utf8\n if (codePoint < 0x80) {\n if ((units -= 1) < 0) break\n bytes.push(codePoint)\n } else if (codePoint < 0x800) {\n if ((units -= 2) < 0) break\n bytes.push(\n codePoint >> 0x6 | 0xC0,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x10000) {\n if ((units -= 3) < 0) break\n bytes.push(\n codePoint >> 0xC | 0xE0,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else if (codePoint < 0x110000) {\n if ((units -= 4) < 0) break\n bytes.push(\n codePoint >> 0x12 | 0xF0,\n codePoint >> 0xC & 0x3F | 0x80,\n codePoint >> 0x6 & 0x3F | 0x80,\n codePoint & 0x3F | 0x80\n )\n } else {\n throw new Error('Invalid code point')\n }\n }\n\n return bytes\n}\n\nfunction asciiToBytes (str) {\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n // Node's code seems to be doing this and not & 0x7F..\n byteArray.push(str.charCodeAt(i) & 0xFF)\n }\n return byteArray\n}\n\nfunction utf16leToBytes (str, units) {\n var c, hi, lo\n var byteArray = []\n for (var i = 0; i < str.length; ++i) {\n if ((units -= 2) < 0) break\n\n c = str.charCodeAt(i)\n hi = c >> 8\n lo = c % 256\n byteArray.push(lo)\n byteArray.push(hi)\n }\n\n return byteArray\n}\n\nfunction base64ToBytes (str) {\n return base64.toByteArray(base64clean(str))\n}\n\nfunction blitBuffer (src, dst, offset, length) {\n for (var i = 0; i < length; ++i) {\n if ((i + offset >= dst.length) || (i >= src.length)) break\n dst[i + offset] = src[i]\n }\n return i\n}\n\nfunction isnan (val) {\n return val !== val // eslint-disable-line no-self-compare\n}\n\n/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(/*! ./../webpack/buildin/global.js */ \"./node_modules/webpack/buildin/global.js\")))\n\n//# sourceURL=webpack:///./node_modules/buffer/index.js?");
-
-/***/ }),
-
-/***/ "./node_modules/google-protobuf/google-protobuf.js":
-/*!*********************************************************!*\
- !*** ./node_modules/google-protobuf/google-protobuf.js ***!
- \*********************************************************/
-/*! no static exports found */
-/***/ (function(module, exports, __webpack_require__) {
-
-eval("/* WEBPACK VAR INJECTION */(function(global, Buffer) {var $jscomp={scope:{},getGlobal:function(a){return\"undefined\"!=typeof window&&window===a?a:\"undefined\"!=typeof global?global:a}};$jscomp.global=$jscomp.getGlobal(this);$jscomp.initSymbol=function(){$jscomp.global.Symbol||($jscomp.global.Symbol=$jscomp.Symbol);$jscomp.initSymbol=function(){}};$jscomp.symbolCounter_=0;$jscomp.Symbol=function(a){return\"jscomp_symbol_\"+a+$jscomp.symbolCounter_++};\n$jscomp.initSymbolIterator=function(){$jscomp.initSymbol();$jscomp.global.Symbol.iterator||($jscomp.global.Symbol.iterator=$jscomp.global.Symbol(\"iterator\"));$jscomp.initSymbolIterator=function(){}};$jscomp.makeIterator=function(a){$jscomp.initSymbolIterator();$jscomp.initSymbol();$jscomp.initSymbolIterator();var b=a[Symbol.iterator];if(b)return b.call(a);var c=0;return{next:function(){return cb;)--c in this?this[--a]=this[c]:delete this[a];return this};$jscomp.array.copyWithin$install=function(){$jscomp.array.installHelper_(\"copyWithin\",$jscomp.array.copyWithin)};\n$jscomp.array.fill=function(a,b,c){var d=this.length||0;0>b&&(b=Math.max(0,d+b));if(null==c||c>d)c=d;c=Number(c);0>c&&(c=Math.max(0,d+c));for(b=Number(b||0);b>>0;if(0===a)return 32;var b=0;0===(a&4294901760)&&(a<<=16,b+=16);0===(a&4278190080)&&(a<<=8,b+=8);0===(a&4026531840)&&(a<<=4,b+=4);0===(a&3221225472)&&(a<<=2,b+=2);0===(a&2147483648)&&b++;return b};$jscomp.math.imul=function(a,b){a=Number(a);b=Number(b);var c=a&65535,d=b&65535;return c*d+((a>>>16&65535)*d+c*(b>>>16&65535)<<16>>>0)|0};$jscomp.math.sign=function(a){a=Number(a);return 0===a||isNaN(a)?a:0a&&-.25a&&-.25a?-b:b};$jscomp.math.acosh=function(a){a=Number(a);return Math.log(a+Math.sqrt(a*a-1))};$jscomp.math.asinh=function(a){a=Number(a);if(0===a)return a;var b=Math.log(Math.abs(a)+Math.sqrt(a*a+1));return 0>a?-b:b};\n$jscomp.math.atanh=function(a){a=Number(a);return($jscomp.math.log1p(a)-$jscomp.math.log1p(-a))/2};$jscomp.math.hypot=function(a,b,c){a=Number(a);b=Number(b);var d,e,f,g=Math.max(Math.abs(a),Math.abs(b));for(d=2;dg){a/=g;b/=g;f=a*a+b*b;for(d=2;da?-b:b};$jscomp.math.cbrt=function(a){if(0===a)return a;a=Number(a);var b=Math.pow(Math.abs(a),1/3);return 0>a?-b:b};$jscomp.number=$jscomp.number||{};$jscomp.number.isFinite=function(a){return\"number\"!==typeof a?!1:!isNaN(a)&&Infinity!==a&&-Infinity!==a};$jscomp.number.isInteger=function(a){return $jscomp.number.isFinite(a)?a===Math.floor(a):!1};\n$jscomp.number.isNaN=function(a){return\"number\"===typeof a&&isNaN(a)};$jscomp.number.isSafeInteger=function(a){return $jscomp.number.isInteger(a)&&Math.abs(a)<=$jscomp.number.MAX_SAFE_INTEGER};$jscomp.number.EPSILON=function(){return Math.pow(2,-52)}();$jscomp.number.MAX_SAFE_INTEGER=function(){return 9007199254740991}();$jscomp.number.MIN_SAFE_INTEGER=function(){return-9007199254740991}();$jscomp.object=$jscomp.object||{};\n$jscomp.object.assign=function(a,b){for(var c=1;cd||1114111=d?b+=String.fromCharCode(d):(d-=65536,b+=String.fromCharCode(d>>>10&1023|55296),b+=String.fromCharCode(d&1023|56320))}return b};\n$jscomp.string.repeat=function(a){var b=$jscomp.checkStringArgs(this,null,\"repeat\");if(0>a||1342177279>>=1)b+=b;return c};$jscomp.string.repeat$install=function(){String.prototype.repeat||(String.prototype.repeat=$jscomp.string.repeat)};\n$jscomp.string.codePointAt=function(a){var b=$jscomp.checkStringArgs(this,null,\"codePointAt\"),c=b.length;a=Number(a)||0;if(0<=a&&ad||56319a||57343=e};\n$jscomp.string.startsWith$install=function(){String.prototype.startsWith||(String.prototype.startsWith=$jscomp.string.startsWith)};$jscomp.string.endsWith=function(a,b){var c=$jscomp.checkStringArgs(this,a,\"endsWith\");a+=\"\";void 0===b&&(b=c.length);for(var d=Math.max(0,Math.min(b|0,c.length)),e=a.length;0=e};$jscomp.string.endsWith$install=function(){String.prototype.endsWith||(String.prototype.endsWith=$jscomp.string.endsWith)};\nvar COMPILED=!0,goog=goog||{};goog.global=this;goog.isDef=function(a){return void 0!==a};goog.exportPath_=function(a,b,c){a=a.split(\".\");c=c||goog.global;a[0]in c||!c.execScript||c.execScript(\"var \"+a[0]);for(var d;a.length&&(d=a.shift());)!a.length&&goog.isDef(b)?c[d]=b:c=c[d]?c[d]:c[d]={}};\ngoog.define=function(a,b){var c=b;COMPILED||(goog.global.CLOSURE_UNCOMPILED_DEFINES&&Object.prototype.hasOwnProperty.call(goog.global.CLOSURE_UNCOMPILED_DEFINES,a)?c=goog.global.CLOSURE_UNCOMPILED_DEFINES[a]:goog.global.CLOSURE_DEFINES&&Object.prototype.hasOwnProperty.call(goog.global.CLOSURE_DEFINES,a)&&(c=goog.global.CLOSURE_DEFINES[a]));goog.exportPath_(a,c)};goog.DEBUG=!0;goog.LOCALE=\"en\";goog.TRUSTED_SITE=!0;goog.STRICT_MODE_COMPATIBLE=!1;goog.DISALLOW_TEST_ONLY_CODE=COMPILED&&!goog.DEBUG;\ngoog.ENABLE_CHROME_APP_SAFE_SCRIPT_LOADING=!1;goog.provide=function(a){if(!COMPILED&&goog.isProvided_(a))throw Error('Namespace \"'+a+'\" already declared.');goog.constructNamespace_(a)};goog.constructNamespace_=function(a,b){if(!COMPILED){delete goog.implicitNamespaces_[a];for(var c=a;(c=c.substring(0,c.lastIndexOf(\".\")))&&!goog.getObjectByName(c);)goog.implicitNamespaces_[c]=!0}goog.exportPath_(a,b)};goog.VALID_MODULE_RE_=/^[a-zA-Z_$][a-zA-Z0-9._$]*$/;\ngoog.module=function(a){if(!goog.isString(a)||!a||-1==a.search(goog.VALID_MODULE_RE_))throw Error(\"Invalid module identifier\");if(!goog.isInModuleLoader_())throw Error(\"Module \"+a+\" has been loaded incorrectly.\");if(goog.moduleLoaderState_.moduleName)throw Error(\"goog.module may only be called once per module.\");goog.moduleLoaderState_.moduleName=a;if(!COMPILED){if(goog.isProvided_(a))throw Error('Namespace \"'+a+'\" already declared.');delete goog.implicitNamespaces_[a]}};goog.module.get=function(a){return goog.module.getInternal_(a)};\ngoog.module.getInternal_=function(a){if(!COMPILED)return goog.isProvided_(a)?a in goog.loadedModules_?goog.loadedModules_[a]:goog.getObjectByName(a):null};goog.moduleLoaderState_=null;goog.isInModuleLoader_=function(){return null!=goog.moduleLoaderState_};\ngoog.module.declareLegacyNamespace=function(){if(!COMPILED&&!goog.isInModuleLoader_())throw Error(\"goog.module.declareLegacyNamespace must be called from within a goog.module\");if(!COMPILED&&!goog.moduleLoaderState_.moduleName)throw Error(\"goog.module must be called prior to goog.module.declareLegacyNamespace.\");goog.moduleLoaderState_.declareLegacyNamespace=!0};\ngoog.setTestOnly=function(a){if(goog.DISALLOW_TEST_ONLY_CODE)throw a=a||\"\",Error(\"Importing test-only code into non-debug environment\"+(a?\": \"+a:\".\"));};goog.forwardDeclare=function(a){};COMPILED||(goog.isProvided_=function(a){return a in goog.loadedModules_||!goog.implicitNamespaces_[a]&&goog.isDefAndNotNull(goog.getObjectByName(a))},goog.implicitNamespaces_={\"goog.module\":!0});\ngoog.getObjectByName=function(a,b){for(var c=a.split(\".\"),d=b||goog.global,e;e=c.shift();)if(goog.isDefAndNotNull(d[e]))d=d[e];else return null;return d};goog.globalize=function(a,b){var c=b||goog.global,d;for(d in a)c[d]=a[d]};goog.addDependency=function(a,b,c,d){if(goog.DEPENDENCIES_ENABLED){var e;a=a.replace(/\\\\/g,\"/\");for(var f=goog.dependencies_,g=0;e=b[g];g++)f.nameToPath[e]=a,f.pathIsModule[a]=!!d;for(d=0;b=c[d];d++)a in f.requires||(f.requires[a]={}),f.requires[a][b]=!0}};\ngoog.ENABLE_DEBUG_LOADER=!0;goog.logToConsole_=function(a){goog.global.console&&goog.global.console.error(a)};goog.require=function(a){if(!COMPILED){goog.ENABLE_DEBUG_LOADER&&goog.IS_OLD_IE_&&goog.maybeProcessDeferredDep_(a);if(goog.isProvided_(a))return goog.isInModuleLoader_()?goog.module.getInternal_(a):null;if(goog.ENABLE_DEBUG_LOADER){var b=goog.getPathFromDeps_(a);if(b)return goog.writeScripts_(b),null}a=\"goog.require could not find: \"+a;goog.logToConsole_(a);throw Error(a);}};\ngoog.basePath=\"\";goog.nullFunction=function(){};goog.abstractMethod=function(){throw Error(\"unimplemented abstract method\");};goog.addSingletonGetter=function(a){a.getInstance=function(){if(a.instance_)return a.instance_;goog.DEBUG&&(goog.instantiatedSingletons_[goog.instantiatedSingletons_.length]=a);return a.instance_=new a}};goog.instantiatedSingletons_=[];goog.LOAD_MODULE_USING_EVAL=!0;goog.SEAL_MODULE_EXPORTS=goog.DEBUG;goog.loadedModules_={};goog.DEPENDENCIES_ENABLED=!COMPILED&&goog.ENABLE_DEBUG_LOADER;\ngoog.DEPENDENCIES_ENABLED&&(goog.dependencies_={pathIsModule:{},nameToPath:{},requires:{},visited:{},written:{},deferred:{}},goog.inHtmlDocument_=function(){var a=goog.global.document;return null!=a&&\"write\"in a},goog.findBasePath_=function(){if(goog.isDef(goog.global.CLOSURE_BASE_PATH))goog.basePath=goog.global.CLOSURE_BASE_PATH;else if(goog.inHtmlDocument_())for(var a=goog.global.document.getElementsByTagName(\"SCRIPT\"),b=a.length-1;0<=b;--b){var c=a[b].src,d=c.lastIndexOf(\"?\"),d=-1==d?c.length:\nd;if(\"base.js\"==c.substr(d-7,7)){goog.basePath=c.substr(0,d-7);break}}},goog.importScript_=function(a,b){(goog.global.CLOSURE_IMPORT_SCRIPT||goog.writeScriptTag_)(a,b)&&(goog.dependencies_.written[a]=!0)},goog.IS_OLD_IE_=!(goog.global.atob||!goog.global.document||!goog.global.document.all),goog.importModule_=function(a){goog.importScript_(\"\",'goog.retrieveAndExecModule_(\"'+a+'\");')&&(goog.dependencies_.written[a]=!0)},goog.queuedModules_=[],goog.wrapModule_=function(a,b){return goog.LOAD_MODULE_USING_EVAL&&\ngoog.isDef(goog.global.JSON)?\"goog.loadModule(\"+goog.global.JSON.stringify(b+\"\\n//# sourceURL=\"+a+\"\\n\")+\");\":'goog.loadModule(function(exports) {\"use strict\";'+b+\"\\n;return exports});\\n//# sourceURL=\"+a+\"\\n\"},goog.loadQueuedModules_=function(){var a=goog.queuedModules_.length;if(0\\x3c/script>')},goog.appendScriptSrcNode_=function(a){var b=goog.global.document,\nc=b.createElement(\"script\");c.type=\"text/javascript\";c.src=a;c.defer=!1;c.async=!1;b.head.appendChild(c)},goog.writeScriptTag_=function(a,b){if(goog.inHtmlDocument_()){var c=goog.global.document;if(!goog.ENABLE_CHROME_APP_SAFE_SCRIPT_LOADING&&\"complete\"==c.readyState){if(/\\bdeps.js$/.test(a))return!1;throw Error('Cannot write \"'+a+'\" after document load');}var d=goog.IS_OLD_IE_;void 0===b?d?(d=\" onreadystatechange='goog.onScriptLoad_(this, \"+ ++goog.lastNonModuleScriptIndex_+\")' \",c.write('