diff --git a/.github/workflows/execute-chart-test.yml b/.github/workflows/execute-chart-test.yml deleted file mode 100644 index a50b26798c..0000000000 --- a/.github/workflows/execute-chart-test.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: chart-test -on: - push: - branches: - - master - - release/v* - pull_request: {} - -jobs: - execute-chart-test: - runs-on: ubuntu-latest - name: chart-test - env: - AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} - AWS_ACCESS_KEY_ID: ${{ secrets.GH_AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.GH_AWS_SECRET_ACCESS_KEY }} - AWS_EC2_METADATA_DISABLED: true - DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }} - steps: - - uses: docker/login-action@v1 - with: - username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }} - password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }} - - uses: actions/checkout@v2 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - name: Install Deps - uses: ./.github/actions/setup-deps - - name: make test-chart - run: | - make test-chart - - name: make release/promote-chart-passed - run: | - make release/promote-chart-passed diff --git a/.github/workflows/execute-tests-and-promote.yml b/.github/workflows/execute-tests-and-promote.yml index 8d68fd365c..1d718d4ec7 100644 --- a/.github/workflows/execute-tests-and-promote.yml +++ b/.github/workflows/execute-tests-and-promote.yml @@ -1,6 +1,6 @@ name: job-promote-to-passed -on: +"on": push: branches: - master @@ -11,6 +11,9 @@ jobs: lint: ######################################################################## runs-on: ubuntu-latest + env: + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} steps: - uses: actions/checkout@v2 with: @@ -24,8 +27,8 @@ jobs: generate: #################################################################### runs-on: ubuntu-latest env: - # Set DEV_REGISTRY to match BASE_REGISTRY. - DEV_REGISTRY: "docker.io/emissaryingress" + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} steps: - uses: actions/checkout@v2 with: @@ -52,6 +55,9 @@ jobs: check-envoy-version: ######################################################### runs-on: ubuntu-latest + env: + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} steps: - uses: actions/checkout@v2 with: @@ -63,6 +69,9 @@ jobs: # Tests ###################################################################### check-gotest: runs-on: ubuntu-latest + env: + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} steps: - uses: actions/checkout@v2 with: @@ -90,6 +99,9 @@ jobs: name: "Collect testing logs" check-pytest: runs-on: ubuntu-latest + env: + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} strategy: fail-fast: false matrix: @@ -147,6 +159,9 @@ jobs: # pytest-unit is separate from pytests (above) because we know for certain that no cluster is needed. # XXX This is pretty much a crock. runs-on: ubuntu-latest + env: + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} strategy: matrix: test: @@ -185,6 +200,34 @@ jobs: - uses: ./.github/actions/collect-testing-logs if: always() name: "Collect testing logs" + check-chart: + runs-on: ubuntu-latest + name: chart-test + env: + AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} + AWS_ACCESS_KEY_ID: ${{ secrets.GH_AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.GH_AWS_SECRET_ACCESS_KEY }} + AWS_EC2_METADATA_DISABLED: true + DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }} + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} + steps: + - uses: docker/login-action@v1 + with: + username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }} + password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }} + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} + - name: Install Deps + uses: ./.github/actions/setup-deps + - name: make test-chart + run: | + make test-chart + - name: make release/promote-chart-passed + run: | + make release/promote-chart-passed build: ####################################################################### runs-on: ubuntu-latest @@ -194,6 +237,8 @@ jobs: AWS_EC2_METADATA_DISABLED: true AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }} DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }} + # See docker/base-python.docker.gen + BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }} steps: - uses: actions/checkout@v2 with: @@ -228,6 +273,7 @@ jobs: - check-gotest - check-pytest - check-pytest-unit + - check-chart runs-on: ubuntu-latest env: AWS_ACCESS_KEY_ID: ${{ secrets.GH_AWS_ACCESS_KEY_ID }} diff --git a/.github/workflows/promote-ga.yml b/.github/workflows/promote-ga.yml index 8a34ec4692..0e99b6ba39 100644 --- a/.github/workflows/promote-ga.yml +++ b/.github/workflows/promote-ga.yml @@ -1,5 +1,5 @@ name: promote-to-ga -on: +"on": push: tags: - 'v[0-9]+.[0-9]+.[0-9]+' diff --git a/.github/workflows/promote-rc.yml b/.github/workflows/promote-rc.yml index 9a061d3601..b140e3b026 100644 --- a/.github/workflows/promote-rc.yml +++ b/.github/workflows/promote-rc.yml @@ -1,5 +1,5 @@ name: promote-to-rc -on: +"on": push: tags: - 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+' diff --git a/.github/workflows/publish-chart.yml b/.github/workflows/publish-chart.yml index 788fcfdd3c..20b709d510 100644 --- a/.github/workflows/publish-chart.yml +++ b/.github/workflows/publish-chart.yml @@ -1,5 +1,5 @@ name: chart-publish -on: +"on": push: tags: - 'chart/v*' diff --git a/.github/workflows/publish-manifests.yml b/.github/workflows/publish-manifests.yml index 29b0f8e26a..12eb2d9e64 100644 --- a/.github/workflows/publish-manifests.yml +++ b/.github/workflows/publish-manifests.yml @@ -1,5 +1,5 @@ name: manifest-publish -on: +"on": push: tags: - 'chart/v*' diff --git a/.github/workflows/repatriate.yml b/.github/workflows/repatriate.yml index b9ecf1edd5..d27b7f70c2 100644 --- a/.github/workflows/repatriate.yml +++ b/.github/workflows/repatriate.yml @@ -1,5 +1,5 @@ name: repatriate -on: +"on": push: branches: - release/v* diff --git a/CHANGELOG.md b/CHANGELOG.md index 431ac6f2e2..2c8f1dd715 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,9 @@ Please see the [Envoy documentation](https://www.envoyproxy.io/docs/envoy/latest to Ambassador's cloud including the command ID, whether it ran successfully, and an error message in case there was any. ([4040]) +- Security: Emissary has been upgraded from Alpine 3.12 to Alpine 3.15, which incorporates numerous + security patches. + [3906]: https://github.com/emissary-ingress/emissary/issues/3906 [3821]: https://github.com/emissary-ingress/emissary/issues/3821 [4040]: https://github.com/emissary-ingress/emissary/pull/4040 diff --git a/Makefile b/Makefile index 180088df91..d0fcfc1594 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,10 @@ ifneq ($(MAKECMDGOALS),$(OSS_HOME)/build-aux/go-version.txt) $(info [make] CHART_VERSION=$(CHART_VERSION)) endif +# If SOURCE_DATE_EPOCH isn't set, AND the tree isn't dirty, then set +# SOURCE_DATE_EPOCH to the commit timestamp. +# +# if [[ -z "$SOURCE_DATE_EPOCH" ]] && [[ -z "$(git status --porcelain)" ]]; then ifeq ($(SOURCE_DATE_EPOCH)$(shell git status --porcelain),) SOURCE_DATE_EPOCH := $(shell git log -1 --pretty=%ct) endif @@ -43,15 +47,6 @@ endif # Everything else... -# BASE_REGISTRY is where the base images (as in -# `builder/Dockerfile.base`) get pulled-from/pushed-to. We default -# this to docker.io/emissaryingress rather than to $(DEV_REGISTRY) or -# to a .local registry because rebuilding orjson takes so long, we -# really want to cache it unless the dev really wants to force doing -# everything locally. -BASE_REGISTRY ?= docker.io/emissaryingress -export BASE_REGISTRY - NAME ?= emissary _git_remote_urls := $(shell git remote | xargs -n1 git remote get-url --all) IS_PRIVATE ?= $(findstring private,$(_git_remote_urls)) diff --git a/OPENSOURCE.md b/OPENSOURCE.md index df6faf238f..5b435e056c 100644 --- a/OPENSOURCE.md +++ b/OPENSOURCE.md @@ -129,68 +129,66 @@ following Free and Open Source software: The Emissary-ingress Python code makes use of the following Free and Open Source libraries: - Name Version License(s) - ---- ------- ---------- - CacheControl 0.12.6 Apache License 2.0 - Cython 0.29.19 Apache License 2.0 - Flask 2.0.2 3-clause BSD license - Jinja2 3.0.3 3-clause BSD license - MarkupSafe 2.0.1 3-clause BSD license - PyYAML 5.4.1 MIT license - Werkzeug 2.0.2 3-clause BSD license - appdirs 1.4.4 MIT license - attrs 19.3.0 MIT license - cachetools 4.1.1 MIT license - certifi 2020.6.20 Mozilla Public License 2.0 - charset-normalizer 2.0.8 MIT license - click 8.0.3 3-clause BSD license - clize 4.2.1 MIT license - colorama 0.4.3 3-clause BSD license - contextlib2 0.6.0 Python Software Foundation license - distlib 0.3.0 Python Software Foundation license - distro 1.5.0 Apache License 2.0 - docutils 0.17.1 2-clause BSD license, GNU General Public License Version 3, Public domain, Python Software Foundation license - durationpy 0.5 MIT license - expiringdict 1.2.1 Apache License 2.0 - google-auth 1.23.0 Apache License 2.0 - gunicorn 20.1.0 MIT license - html5lib 1.0.1 MIT license - idna 2.7 3-clause BSD license, Python Software Foundation license, Unicode License Agreement for Data Files and Software (2015) - importlib-resources 5.4.0 Apache License 2.0 - itsdangerous 2.0.1 3-clause BSD license - jsonpatch 1.32 3-clause BSD license - jsonpointer 2.0 3-clause BSD license - jsonschema 4.2.1 MIT license - k8s-proto 0.0.3 Apache License 2.0 - kubernetes 20.13.0 Apache License 2.0 - lockfile 0.12.2 MIT license - msgpack 1.0.0 Apache License 2.0 - oauthlib 3.1.0 3-clause BSD license - od 1.0 MIT license - ordered-set 4.0.1 MIT license - orjson 3.3.1 Apache License 2.0, MIT license - packaging 20.4 2-clause BSD license, Apache License 2.0 - pep517 0.8.2 MIT license - pip-tools 5.3.1 3-clause BSD license - progress 1.5 ISC license - prometheus-client 0.12.0 Apache License 2.0 - protobuf 3.13.0 3-clause BSD license - pyasn1 0.4.8 2-clause BSD license - pyasn1-modules 0.2.8 2-clause BSD license - pyparsing 2.4.7 MIT license - pyrsistent 0.17.3 MIT license - python-dateutil 2.8.1 3-clause BSD license, Apache License 2.0 - python-json-logger 2.0.2 2-clause BSD license - pytoml 0.1.21 MIT license - requests 2.26.0 Apache License 2.0 - requests-oauthlib 1.3.0 ISC license - retrying 1.3.3 Apache License 2.0 - rsa 4.6 Apache License 2.0 - semantic-version 2.8.5 2-clause BSD license - sigtools 2.0.2 MIT license - six 1.15.0 MIT license - toml 0.10.1 MIT license - urllib3 1.26.5 MIT license - webencodings 0.5.1 3-clause BSD license - websocket-client 0.57.0 3-clause BSD license - zipp 3.6.0 MIT license + Name Version License(s) + ---- ------- ---------- + CacheControl 0.12.10 Apache License 2.0 + Cython 0.29.24 Apache License 2.0 + Flask 2.0.2 3-clause BSD license + Jinja2 3.0.3 3-clause BSD license + MarkupSafe 2.0.1 3-clause BSD license + PyYAML 5.4.1 MIT license + Werkzeug 2.0.2 3-clause BSD license + appdirs 1.4.4 MIT license + attrs 19.3.0 MIT license + cachetools 4.1.1 MIT license + certifi 2020.6.20 Mozilla Public License 2.0 + charset-normalizer 2.0.8 MIT license + click 8.0.3 3-clause BSD license + clize 4.2.1 MIT license + colorama 0.4.4 3-clause BSD license + contextlib2 21.6.0 Python Software Foundation license + distlib 0.3.3 Python Software Foundation license + distro 1.6.0 Apache License 2.0 + docutils 0.17.1 2-clause BSD license, GNU General Public License Version 3, Public domain, Python Software Foundation license + durationpy 0.5 MIT license + expiringdict 1.2.1 Apache License 2.0 + google-auth 1.23.0 Apache License 2.0 + gunicorn 20.1.0 MIT license + html5lib 1.1 MIT license + idna 2.7 3-clause BSD license, Python Software Foundation license, Unicode License Agreement for Data Files and Software (2015) + itsdangerous 2.0.1 3-clause BSD license + jsonpatch 1.32 3-clause BSD license + jsonpointer 2.0 3-clause BSD license + jsonschema 4.2.1 MIT license + k8s-proto 0.0.3 Apache License 2.0 + kubernetes 20.13.0 Apache License 2.0 + lockfile 0.12.2 MIT license + msgpack 1.0.2 Apache License 2.0 + oauthlib 3.1.0 3-clause BSD license + od 1.0 MIT license + ordered-set 4.0.2 MIT license + orjson 3.6.6 Apache License 2.0, MIT license + packaging 20.9 2-clause BSD license, Apache License 2.0 + pep517 0.12.0 MIT license + pip-tools 6.3.1 3-clause BSD license + progress 1.6 ISC license + prometheus-client 0.12.0 Apache License 2.0 + protobuf 3.13.0 3-clause BSD license + pyasn1 0.4.8 2-clause BSD license + pyasn1-modules 0.2.8 2-clause BSD license + pyparsing 2.4.7 MIT license + pyrsistent 0.17.3 MIT license + python-dateutil 2.8.1 3-clause BSD license, Apache License 2.0 + python-json-logger 2.0.2 2-clause BSD license + requests 2.26.0 Apache License 2.0 + requests-oauthlib 1.3.0 ISC license + retrying 1.3.3 Apache License 2.0 + rsa 4.6 Apache License 2.0 + semantic-version 2.8.5 2-clause BSD license + sigtools 2.0.2 MIT license + six 1.16.0 MIT license + toml 0.10.2 MIT license + tomli 1.2.2 MIT license + urllib3 1.26.5 MIT license + webencodings 0.5.1 3-clause BSD license + websocket-client 0.57.0 3-clause BSD license diff --git a/build-aux/generate.mk b/build-aux/generate.mk index 14a3942242..cd4db91f15 100644 --- a/build-aux/generate.mk +++ b/build-aux/generate.mk @@ -60,6 +60,10 @@ generate-fast/files += $(OSS_HOME)/pkg/api/getambassador.io/v3alpha1/zz_generate generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-crds.yaml.in generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-emissaryns.yaml.in generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-defaultns.yaml.in +generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-emissaryns-agent.yaml.in +generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-defaultns-agent.yaml.in +generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-emissaryns-migration.yaml.in +generate-fast/files += $(OSS_HOME)/manifests/emissary/emissary-defaultns-migration.yaml.in generate-fast/files += $(OSS_HOME)/pkg/api/getambassador.io/crds.yaml generate-fast/files += $(OSS_HOME)/python/tests/integration/manifests/ambassador.yaml generate-fast/files += $(OSS_HOME)/python/tests/integration/manifests/crds.yaml @@ -386,43 +390,67 @@ $(OSS_HOME)/python/tests/integration/manifests/crds.yaml: $(OSS_HOME)/_generate. $(OSS_HOME)/pkg/api/getambassador.io/crds.yaml: $(OSS_HOME)/_generate.tmp/crds $(tools/fix-crds) $(tools/fix-crds) --target=internal-validator $(sort $(wildcard $$@ +# Names for all the helm-expanded.yaml files (and thence output.yaml and *.yaml.in files) helm.name.emissary-emissaryns = emissary-ingress helm.name.emissary-defaultns = emissary-ingress helm.namespace.emissary-emissaryns = emissary helm.namespace.emissary-defaultns = default +helm.name.emissary-emissaryns-agent = emissary-ingress +helm.namespace.emissary-emissaryns-agent = emissary +helm.name.emissary-defaultns-agent = emissary-ingress +helm.namespace.emissary-defaultns-agent = default +helm.name.emissary-emissaryns-migration = emissary-ingress +helm.namespace.emissary-emissaryns-migration = emissary +helm.name.emissary-defaultns-migration = emissary-ingress +helm.namespace.emissary-defaultns-migration = default + +# IF YOU'RE LOOKING FOR *.yaml: recipes, look in version-hack.mk at the +# build-aux/version-hack.stamp.mk dependencies. + $(OSS_HOME)/k8s-config/%/helm-expanded.yaml: \ $(OSS_HOME)/k8s-config/%/values.yaml \ $(OSS_HOME)/charts/emissary-ingress/templates $(wildcard $(OSS_HOME)/charts/emissary-ingress/templates/*.yaml) \ $(OSS_HOME)/charts/emissary-ingress/values.yaml \ FORCE helm template --namespace=$(helm.namespace.$*) --values=$(@D)/values.yaml $(or $(helm.name.$*),$*) $(OSS_HOME)/charts/emissary-ingress >$@ + $(OSS_HOME)/k8s-config/%/output.yaml: \ $(OSS_HOME)/k8s-config/%/helm-expanded.yaml \ $(OSS_HOME)/k8s-config/%/require.yaml \ $(tools/filter-yaml) $(tools/filter-yaml) $(filter %/helm-expanded.yaml,$^) $(filter %/require.yaml,$^) >$@ + $(OSS_HOME)/manifests/emissary/%.yaml.in: $(OSS_HOME)/k8s-config/%/output.yaml cp $< $@ + $(OSS_HOME)/python/tests/integration/manifests/%.yaml: $(OSS_HOME)/k8s-config/kat-%/output.yaml sed -e 's/«/{/g' -e 's/»/}/g' -e 's/♯.*//g' -e 's/- ←//g' <$< >$@ + $(OSS_HOME)/python/tests/integration/manifests/rbac_cluster_scope.yaml: $(OSS_HOME)/k8s-config/kat-rbac-multinamespace/output.yaml sed -e 's/«/{/g' -e 's/»/}/g' -e 's/♯.*//g' -e 's/- ←//g' <$< >$@ + $(OSS_HOME)/python/tests/integration/manifests/rbac_namespace_scope.yaml: $(OSS_HOME)/k8s-config/kat-rbac-singlenamespace/output.yaml sed -e 's/«/{/g' -e 's/»/}/g' -e 's/♯.*//g' -e 's/- ←//g' <$< >$@ # # Generate report on dependencies -$(OSS_HOME)/build-aux/pip-show.txt: docker/builder-base.docker - docker run --rm "$$(cat docker/builder-base.docker)" sh -c 'pip freeze --exclude-editable | cut -d= -f1 | xargs pip show' > $@ - -$(OSS_HOME)/builder/requirements.txt: %.txt: %.in FORCE - $(BUILDER) pip-compile +$(OSS_HOME)/builder/.requirements.txt.stamp: $(OSS_HOME)/builder/requirements.in docker/base-python.docker.tag.local +# The --interactive is so that stdin gets passed through; otherwise Docker closes stdin. + set -ex -o pipefail; { \ + docker run --rm --interactive "$$(cat docker/base-python.docker)" sh -c 'tar xf - && pip-compile --allow-unsafe -q >&2 && cat requirements.txt' \ + < <(bsdtar -cf - -C $(@D) requirements.in requirements.txt) \ + > $@; } +$(OSS_HOME)/builder/requirements.txt: $(OSS_HOME)/builder/%: $(OSS_HOME)/builder/.%.stamp $(tools/copy-ifchanged) + $(tools/copy-ifchanged) $< $@ .PRECIOUS: $(OSS_HOME)/builder/requirements.txt -$(OSS_HOME)/build-aux/go-version.txt: $(OSS_HOME)/builder/Dockerfile.base +$(OSS_HOME)/build-aux/pip-show.txt: docker/base-pip.docker.tag.local + docker run --rm "$$(cat docker/base-pip.docker)" sh -c 'pip freeze --exclude-editable | cut -d= -f1 | xargs pip show' > $@ + +$(OSS_HOME)/build-aux/go-version.txt: docker/base-python/Dockerfile sed -En 's,.*https://dl\.google\.com/go/go([0-9a-z.-]*)\.linux-amd64\.tar\.gz.*,\1,p' < $< > $@ -$(OSS_HOME)/build-aux/py-version.txt: $(OSS_HOME)/builder/Dockerfile.base +$(OSS_HOME)/build-aux/py-version.txt: docker/base-python/Dockerfile { grep -o 'python3=\S*' | cut -d= -f2; } < $< > $@ $(OSS_HOME)/build-aux/go1%.src.tar.gz: diff --git a/build-aux/main.mk b/build-aux/main.mk index 11168cf624..290594f179 100644 --- a/build-aux/main.mk +++ b/build-aux/main.mk @@ -1,5 +1,8 @@ include build-aux/tools.mk +# +# Utility rules + # For files that should only-maybe update when the rule runs, put ".stamp" on # the left-side of the ":", and just go ahead and update it within the rule. # @@ -11,6 +14,7 @@ include build-aux/tools.mk docker/%: docker/.%.stamp $(tools/copy-ifchanged) $(tools/copy-ifchanged) $< $@ +# Load ocibuild files in to dockerd. _ocibuild-images = base _ocibuild-images += kat-client _ocibuild-images += kat-server @@ -18,5 +22,43 @@ $(foreach img,$(_ocibuild-images),docker/.$(img).docker.stamp): docker/.%.docker docker load < $< docker inspect $$(bsdtar xfO $< manifest.json|jq -r '.[0].RepoTags[0]') --format='{{.Id}}' > $@ -docker/.base.img.tar.stamp: FORCE $(tools/crane) builder/Dockerfile - $(tools/crane) pull $(shell sed -n 's,ARG base=,,p' < builder/Dockerfile) $@ || test -e $@ +# +# Specific rules + +# For images we can either write rules for +# - `docker/.NAME.img.tar.stamp` for ocibuild-oriented images, or +# - `docker/.NAME.docker.stamp` for `docker build`-oriented images. +# +# Note that there are a few images used by the test suite that are +# defined in check.mk, rather than here. + +# base: Base OS; none of our specific stuff. Used for auxiliar test images +# that don't need Emissary-specif stuff. +docker/.base.img.tar.stamp: FORCE $(tools/crane) docker/base-python/Dockerfile + $(tools/crane) pull $(shell gawk '$$1 == "FROM" { print $$2; quit; }' < docker/base-python/Dockerfile) $@ || test -e $@ + +# base-python: Base OS, plus some Emissary-specific setup of +# low-level/expensive pieces of the Python environment. This does NOT +# include the packages installed by `requirements.txt`. +# +# At the moment, it also includes some other stuff too (kubectl...), +# but including those things at such an early stage should be +# understood to be debt from a previous build system, and not +# something we're actually happy with. +# +# In the long-run, this will likely always be a `docker build` rather +# than an `ocibuild`, in order to do truly base-OS-specific setup +# (`apk add`, libc-specific compilation...). +docker/.base-python.docker.stamp: FORCE docker/base-python/Dockerfile docker/base-python.docker.gen + docker/base-python.docker.gen >$@ + +# base-pip: base-python, but with requirements.txt installed. +# +# Mixed feelings about this one; it kinda wants to not be a separate +# image and just be part of the main emissary Dockerfile. But that +# would create problems for generate.mk's `pip freeze` step. Perhaps +# it will get to go away with `ocibuild`. +docker/base-pip/requirements.txt: $(OSS_HOME)/builder/requirements.txt $(tools/copy-ifchanged) + $(tools/copy-ifchanged) $< $@ +docker/.base-pip.docker.stamp: docker/.%.docker.stamp: docker/%/Dockerfile docker/%/requirements.txt docker/base-python.docker.tag.local + docker build --build-arg=from="$$(sed -n 2p docker/base-python.docker.tag.local)" --iidfile=$@ $( ${GRN}Bootstrapping builder base image${END}\n" - @$(BUILDER) build-builder-base >$@ - docker/.base-envoy.docker.stamp: FORCE @set -e; { \ if docker image inspect $(ENVOY_DOCKER_TAG) --format='{{ .Id }}' >$@ 2>/dev/null; then \ @@ -238,14 +226,16 @@ docker/.base-envoy.docker.stamp: FORCE fi; \ echo $(ENVOY_DOCKER_TAG) >$@; \ } -docker/.$(LCNAME).docker.stamp: %/.$(LCNAME).docker.stamp: %/base-envoy.docker.tag.local %/builder-base.docker python/ambassador.version $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE +docker/.$(LCNAME).docker.stamp: %/.$(LCNAME).docker.stamp: %/base.docker.tag.local %/base-envoy.docker.tag.local %/base-pip.docker.tag.local python/ambassador.version $(BUILDER_HOME)/Dockerfile $(OSS_HOME)/build-aux/py-version.txt $(tools/dsum) FORCE @printf "${CYN}==> ${GRN}Building image ${BLU}$(LCNAME)${END}\n" + @printf " ${BLU}base=$$(sed -n 2p $*/base.docker.tag.local)${END}\n" @printf " ${BLU}envoy=$$(cat $*/base-envoy.docker)${END}\n" - @printf " ${BLU}builderbase=$$(cat $*/builder-base.docker)${END}\n" + @printf " ${BLU}builderbase=$$(sed -n 2p $*/base-pip.docker.tag.local)${END}\n" { $(tools/dsum) '$(LCNAME) build' 3s \ docker build -f ${BUILDER_HOME}/Dockerfile . \ + --build-arg=base="$$(sed -n 2p $*/base.docker.tag.local)" \ --build-arg=envoy="$$(cat $*/base-envoy.docker)" \ - --build-arg=builderbase="$$(cat $*/builder-base.docker)" \ + --build-arg=builderbase="$$(sed -n 2p $*/base-pip.docker.tag.local)" \ --build-arg=py_version="$$(cat build-aux/py-version.txt)" \ --iidfile=$@; } @@ -330,35 +320,6 @@ _runner: @su -s /bin/bash $$INTERACTIVE_USER -c "$$ENTRYPOINT" .PHONY: _runner -# This target is a convenience alias for running the _bash target. -docker/shell: docker/run/_bash -.PHONY: docker/shell - -# This target runs any existing target inside of the builder base docker image. -docker/run/%: docker/builder-base.docker - docker run --net=host \ - -e INTERACTIVE_UID=$$(id -u) \ - -e INTERACTIVE_GID=$$(id -g) \ - -e INTERACTIVE_USER=$$(id -u -n) \ - -e INTERACTIVE_GROUP=$$(id -g -n) \ - -e PYTEST_ARGS="$$PYTEST_ARGS" \ - -e AMBASSADOR_DOCKER_IMAGE="$$AMBASSADOR_DOCKER_IMAGE" \ - -e DEV_KUBECONFIG="$$DEV_KUBECONFIG" \ - -v /etc/resolv.conf:/etc/resolv.conf \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $${DEV_KUBECONFIG}:$${DEV_KUBECONFIG} \ - -v $${PWD}:$${PWD} \ - -it \ - --init \ - --cap-add=NET_ADMIN \ - --entrypoint /bin/bash \ - $$(cat docker/builder-base.docker) -c "cd $$PWD && ENTRYPOINT=make\ $* make --quiet _runner" - -# Don't try running 'make shell' from within docker. That target already tries to run a builder shell. -# Instead, quietly define 'docker/run/shell' to be an alias for 'docker/shell'. -docker/run/shell: - $(MAKE) --quiet docker/shell - setup-envoy: extract-bin-envoy pytest: push-pytest-images @@ -443,23 +404,9 @@ mypy: mypy-server { . $(OSS_HOME)/venv/bin/activate && time dmypy check python; } .PHONY: mypy -# If we're setting up within Alpine linux, make sure to pin pip and pip-tools -# to something that is still PEP517 compatible. This allows us to set _manylinux.py -# and convince pip to install prebuilt wheels. We do this because there's no good -# rust toolchain to build orjson within Alpine itself. $(OSS_HOME)/venv: builder/requirements.txt builder/requirements-dev.txt rm -rf $@ python3 -m venv $@ - { \ - if grep "Alpine Linux" /etc/issue &>/dev/null; then \ - $@/bin/pip3 install -U pip==20.2.4 pip-tools==5.3.1; \ - echo 'manylinux1_compatible = True' > $@/lib/python3.8/site-packages/_manylinux.py; \ - $@/bin/pip3 install orjson==3.3.1; \ - rm -f venv/lib/python3.8/site-packages/_manylinux.py; \ - else \ - $@/bin/pip3 install orjson==3.6.0; \ - fi; \ - } $@/bin/pip3 install -r builder/requirements.txt $@/bin/pip3 install -r builder/requirements-dev.txt $@/bin/pip3 install -e $(OSS_HOME)/python @@ -771,14 +718,6 @@ release/ga-check: --source-registry=$(RELEASE_REGISTRY) \ --image-name=$(LCNAME); } -clean: - @$(BUILDER) clean -.PHONY: clean - -clobber: - @$(BUILDER) clobber -.PHONY: clobber - AMBASSADOR_DOCKER_IMAGE = $(shell sed -n 2p docker/$(LCNAME).docker.push.remote 2>/dev/null) export AMBASSADOR_DOCKER_IMAGE @@ -856,13 +795,6 @@ by setting $(BLU)$$DEV_USE_IMAGEPULLSECRET$(END) to a non-empty value. The imagePullSecret will be constructed from $(BLD)$$DEV_REGISTRY$(END), $(BLU)$$DOCKER_BUILD_USERNAME$(END), and $(BLU)$$DOCKER_BUILD_PASSWORD$(END). -By default, the base builder image is (as an optimization) pulled from -$(BLU)$$BASE_REGISTRY$(END) instead of being built locally; where $(BLD)$$BASE_REGISTRY$(END) -defaults to $(BLD)docker.io/emissaryingress$(END). If that pull fails, (as it will if the -image does not yet exist), then it falls back to building the base image -locally. If $(BLD)$$BASE_REGISTRY$(END) is equal to $(BLD)$$DEV_REGISTRY$(END), then it will -proceed to push the built image back to the $(BLD)$$BASE_REGISTRY$(END). - Use $(BLD)$(MAKE) $(BLU)targets$(END) for help about available $(BLD)make$(END) targets. endef diff --git a/builder/builder.sh b/builder/builder.sh deleted file mode 100755 index 2a8f9bd915..0000000000 --- a/builder/builder.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env bash - -# Choose colors carefully. If they don't work on both a black -# background and a white background, pick other colors (so white, -# yellow, and black are poor choices). -RED=$'\033[1;31m' -GRN=$'\033[1;32m' -BLU=$'\033[1;34m' -CYN=$'\033[1;36m' -END=$'\033[0m' - -set -e - -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink - DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )" - SOURCE="$(readlink "$SOURCE")" - [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located -done -DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )" - -dsum() { - local exe=${DIR}/../tools/bin/dsum - if ! test -f "$exe"; then - make -C "$DIR/.." tools/bin/dsum - fi - "$exe" "$@" -} - -msg2() { - printf "${BLU} -> ${GRN}%s${END}\n" "$*" >&2 -} - -panic() { - printf 'panic: %s\n' "$*" >&2 - exit 1 -} - -# Usage: build_builder_base [--stage1-only] -# Effects: -# 1. Set the `builder_base_image` variable in the parent scope -# 2. Ensure that the `$builder_base_image` Docker image exists (pulling -# it or building it if it doesn't). -# 3. (If $DEV_REGISTRY is set AND we built the image) push the -# `$builder_base_image` Docker image. -# -# Description: -# -# Rebuild (and push if DEV_REGISTRY is set) the builder's base image if -# - `Dockerfile.base` changes -# - `requirements.txt` changes -# - Enough time has passed (The base only has external/third-party -# dependencies, and most of those dependencies are not pinned by -# version, so we rebuild periodically to make sure we don't fall too -# far behind and then get surprised when a rebuild is required for -# Dockerfile changes.) We have defined "enough time" as a few days. -# See the variable "build_every_n_days" below. -# -# The base theory of operation is that we generate a Docker tag name that -# is essentially the tuple -# (rounded_timestamp, hash(Dockerfile.base), hash(requirements.txt) -# then check that tag for existence/pullability using `docker run --rm -# --entrypoint=true`; and build it if it doesn't exist and can't be -# pulled. -# -# OK, now for a wee bit of complexity. We want to use `pip-compile` to -# update `requirements.txt`. Because of Python-version-conditioned -# dependencies, we really want to run it with the image's python3, not -# with the host's python3. And since we're updating `requirements.txt`, -# we don't really want the `pip install` to have already been run. So, -# we split the base image in to two stages; stage-1 is everything but -# `COPY requirements.txt` / `pip install -r requirements.txt`, and then -# stage-2 copies in `requirements.txt` and runs the `pip install`. In -# normal operation we just go ahead and build both stages. But if the -# `--stage1-only` flag is given (as it is by the `pip-compile` -# subcommand), then we only build the stage-1, and set the -# `builder_base_image` variable to that. -build_builder_base() { - local builder_base_tag_py=' -# Someone please rewrite this in portable Bash. Until then, this code -# works on Python 2.7 and 3.5+. - -import datetime, hashlib - -# Arrange these 2 variables to reduce the likelihood that build_every_n_days -# passes in the middle of a CI workflow; have it happen weekly during the -# weekend. -build_every_n_days = 7 # Periodic rebuild even if Dockerfile does not change -epoch = datetime.datetime(2020, 11, 8, 5, 0) # 1AM EDT on a Sunday - -age = int((datetime.datetime.now() - epoch).days / build_every_n_days) -age_start = epoch + datetime.timedelta(days=age*build_every_n_days) - -dockerfilehash = hashlib.sha256(open("Dockerfile.base", "rb").read()).hexdigest() -stage1 = "%sx%s-%s" % (age_start.strftime("%Y%m%d"), build_every_n_days, dockerfilehash[:16]) - -requirementshash = hashlib.sha256(open("requirements.txt", "rb").read()).hexdigest() -stage2 = "%s-%s" % (stage1, requirementshash[:16]) - -print("stage1_tag=%s" % stage1) -print("stage2_tag=%s" % stage2) -' - - local stage1_tag stage2_tag - eval "$(cd "$DIR" && python -c "$builder_base_tag_py")" # sets 'stage1_tag' and 'stage2_tag' - - local name1="${BASE_REGISTRY}/builder-base:stage1-${stage1_tag}" - local name2="${BASE_REGISTRY}/builder-base:stage2-${stage2_tag}" - - msg2 "Using stage-1 base ${BLU}${name1}${GRN}" - if ! (docker image inspect "$name1" || docker pull "$name1") &>/dev/null; then # skip building if the "$name1" already exists - dsum 'stage-1 build' 3s \ - docker build -f "${DIR}/Dockerfile.base" -t "${name1}" --target builderbase-stage1 "${DIR}" - if [[ "$BASE_REGISTRY" == "$DEV_REGISTRY" ]]; then - TIMEFORMAT=" (stage-1 push took %1R seconds)" - time docker push "$name1" - unset TIMEFORMAT - fi - fi - if [[ $1 = '--stage1-only' ]]; then - builder_base_image="$name1" # not local - return - fi - - msg2 "Using stage-2 base ${BLU}${name2}${GRN}" - if ! (docker image inspect "$name2" || docker pull "$name2") &>/dev/null; then # skip building if the "$name2" already exists - dsum 'stage-2 build' 3s \ - docker build --build-arg=builderbase_stage1="$name1" -f "${DIR}/Dockerfile.base" -t "${name2}" --target builderbase-stage2 "${DIR}" - if [[ "$BASE_REGISTRY" == "$DEV_REGISTRY" ]]; then - TIMEFORMAT=" (stage-2 push took %1R seconds)" - time docker push "$name2" - unset TIMEFORMAT - fi - fi - - builder_base_image="$name2" # not local -} - -cmd="${1:-help}" - -case "${cmd}" in - pip-compile) - build_builder_base --stage1-only - printf "${GRN}Running pip-compile to update ${BLU}requirements.txt${END}\n" - docker run --rm -i "$builder_base_image" sh -c 'tar xf - && pip-compile --allow-unsafe -q >&2 && cat requirements.txt' \ - < <(cd "$DIR" && tar cf - requirements.in requirements.txt) \ - > "$DIR/requirements.txt.tmp" - mv -f "$DIR/requirements.txt.tmp" "$DIR/requirements.txt" - ;; - - build-builder-base) - build_builder_base >&2 - echo "${builder_base_image}" - ;; - *) - echo "usage: builder.sh [pip-compile|build-builder-base]" - exit 1 - ;; -esac diff --git a/builder/requirements.in b/builder/requirements.in index 4103b202ad..323ca72d44 100644 --- a/builder/requirements.in +++ b/builder/requirements.in @@ -11,6 +11,7 @@ jsonpatch==1.32 jsonschema==4.2.1 k8s-proto==0.0.3 kubernetes==20.13.0 +orjson prometheus_client==0.12.0 python-json-logger==2.0.2 pyyaml==5.4.1 diff --git a/builder/requirements.txt b/builder/requirements.txt index f9b0cfbc77..a94a33940e 100644 --- a/builder/requirements.txt +++ b/builder/requirements.txt @@ -1,51 +1,119 @@ # -# This file is autogenerated by pip-compile +# This file is autogenerated by pip-compile with python 3.9 # To update, run: # # pip-compile --allow-unsafe # -attrs==19.3.0 # via clize, jsonschema -cachetools==4.1.1 # via google-auth -certifi==2020.6.20 # via kubernetes, requests -charset-normalizer==2.0.8 # via requests -click==8.0.3 # via -r requirements.in, flask -clize==4.2.1 # via -r requirements.in -docutils==0.17.1 # via clize -durationpy==0.5 # via -r requirements.in -expiringdict==1.2.1 # via -r requirements.in -flask==2.0.2 # via -r requirements.in -google-auth==1.23.0 # via kubernetes -gunicorn==20.1.0 # via -r requirements.in -idna==2.7 # via requests -importlib-resources==5.4.0 # via jsonschema -itsdangerous==2.0.1 # via flask -jinja2==3.0.3 # via flask -jsonpatch==1.32 # via -r requirements.in -jsonpointer==2.0 # via jsonpatch -jsonschema==4.2.1 # via -r requirements.in -k8s-proto==0.0.3 # via -r requirements.in -kubernetes==20.13.0 # via -r requirements.in -markupsafe==2.0.1 # via jinja2 -oauthlib==3.1.0 # via requests-oauthlib -od==1.0 # via clize -prometheus_client==0.12.0 # via -r requirements.in -protobuf==3.13.0 # via k8s-proto -pyasn1-modules==0.2.8 # via google-auth -pyasn1==0.4.8 # via pyasn1-modules, rsa -pyrsistent==0.17.3 # via jsonschema -python-dateutil==2.8.1 # via kubernetes -python-json-logger==2.0.2 # via -r requirements.in -pyyaml==5.4.1 # via -r requirements.in, kubernetes -requests-oauthlib==1.3.0 # via kubernetes -requests==2.26.0 # via -r requirements.in, kubernetes, requests-oauthlib -rsa==4.6 # via google-auth -semantic-version==2.8.5 # via -r requirements.in -sigtools==2.0.2 # via clize -six==1.15.0 # via clize, google-auth, kubernetes, protobuf, python-dateutil, sigtools, websocket-client -urllib3==1.26.5 # via -r requirements.in, kubernetes, requests -websocket-client==0.57.0 # via kubernetes -werkzeug==2.0.2 # via flask -zipp==3.6.0 # via importlib-resources +attrs==19.3.0 + # via + # clize + # jsonschema +cachetools==4.1.1 + # via google-auth +certifi==2020.6.20 + # via + # kubernetes + # requests +charset-normalizer==2.0.8 + # via requests +click==8.0.3 + # via + # -r requirements.in + # flask +clize==4.2.1 + # via -r requirements.in +docutils==0.17.1 + # via clize +durationpy==0.5 + # via -r requirements.in +expiringdict==1.2.1 + # via -r requirements.in +flask==2.0.2 + # via -r requirements.in +google-auth==1.23.0 + # via kubernetes +gunicorn==20.1.0 + # via -r requirements.in +idna==2.7 + # via requests +itsdangerous==2.0.1 + # via flask +jinja2==3.0.3 + # via flask +jsonpatch==1.32 + # via -r requirements.in +jsonpointer==2.0 + # via jsonpatch +jsonschema==4.2.1 + # via -r requirements.in +k8s-proto==0.0.3 + # via -r requirements.in +kubernetes==20.13.0 + # via -r requirements.in +markupsafe==2.0.1 + # via jinja2 +oauthlib==3.1.0 + # via requests-oauthlib +od==1.0 + # via clize +orjson==3.6.6 + # via -r requirements.in +prometheus_client==0.12.0 + # via -r requirements.in +protobuf==3.13.0 + # via k8s-proto +pyasn1==0.4.8 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.2.8 + # via google-auth +pyrsistent==0.17.3 + # via jsonschema +python-dateutil==2.8.1 + # via kubernetes +python-json-logger==2.0.2 + # via -r requirements.in +pyyaml==5.4.1 + # via + # -r requirements.in + # kubernetes +requests==2.26.0 + # via + # -r requirements.in + # kubernetes + # requests-oauthlib +requests-oauthlib==1.3.0 + # via kubernetes +rsa==4.6 + # via google-auth +semantic-version==2.8.5 + # via -r requirements.in +sigtools==2.0.2 + # via clize +six==1.16.0 + # via + # clize + # google-auth + # kubernetes + # protobuf + # python-dateutil + # sigtools + # websocket-client +urllib3==1.26.5 + # via + # -r requirements.in + # kubernetes + # requests +websocket-client==0.57.0 + # via kubernetes +werkzeug==2.0.2 + # via flask # The following packages are considered to be unsafe in a requirements file: -setuptools==50.3.2 # via google-auth, gunicorn, kubernetes, protobuf +setuptools==50.3.2 + # via + # google-auth + # gunicorn + # kubernetes + # protobuf diff --git a/cmd/entrypoint/consul.go b/cmd/entrypoint/consul.go index bafda4ae25..21f83a632d 100644 --- a/cmd/entrypoint/consul.go +++ b/cmd/entrypoint/consul.go @@ -19,7 +19,7 @@ type consulMapping struct { Resolver string } -func ReconcileConsul(ctx context.Context, consul *consul, s *snapshotTypes.KubernetesSnapshot) error { +func ReconcileConsul(ctx context.Context, consulWatcher *consulWatcher, s *snapshotTypes.KubernetesSnapshot) error { var mappings []consulMapping for _, list := range s.Annotations { for _, a := range list { @@ -55,11 +55,11 @@ func ReconcileConsul(ctx context.Context, consul *consul, s *snapshotTypes.Kuber } } - return consul.reconcile(ctx, s.ConsulResolvers, mappings) + return consulWatcher.reconcile(ctx, s.ConsulResolvers, mappings) } -type consul struct { - watcher Watcher +type consulWatcher struct { + watchFunc watchConsulFunc resolvers map[string]*resolver firstReconcileHasHappened bool @@ -77,23 +77,17 @@ type consul struct { bootstrapped bool } -func newConsul(ctx context.Context, watcher Watcher) *consul { - result := &consul{ - watcher: watcher, +func newConsulWatcher(watchFunc watchConsulFunc) *consulWatcher { + return &consulWatcher{ + watchFunc: watchFunc, resolvers: make(map[string]*resolver), coalescedDirty: make(chan struct{}), endpointsCh: make(chan consulwatch.Endpoints), endpoints: make(map[string]consulwatch.Endpoints), } - go func() { - if err := result.run(ctx); err != nil { - panic(err) // TODO: Find a better way of reporting errors from goroutines. - } - }() - return result } -func (c *consul) run(ctx context.Context) error { +func (c *consulWatcher) run(ctx context.Context) error { dirty := false for { if dirty { @@ -118,17 +112,17 @@ func (c *consul) run(ctx context.Context) error { } } -func (c *consul) updateEndpoints(endpoints consulwatch.Endpoints) { +func (c *consulWatcher) updateEndpoints(endpoints consulwatch.Endpoints) { c.mutex.Lock() defer c.mutex.Unlock() c.endpoints[endpoints.Service] = endpoints } -func (c *consul) changed() chan struct{} { +func (c *consulWatcher) changed() chan struct{} { return c.coalescedDirty } -func (c *consul) update(snap *snapshotTypes.ConsulSnapshot) { +func (c *consulWatcher) update(snap *snapshotTypes.ConsulSnapshot) { c.mutex.Lock() defer c.mutex.Unlock() snap.Endpoints = make(map[string]consulwatch.Endpoints, len(c.endpoints)) @@ -137,7 +131,7 @@ func (c *consul) update(snap *snapshotTypes.ConsulSnapshot) { } } -func (c *consul) isBootstrapped() bool { +func (c *consulWatcher) isBootstrapped() bool { if !c.firstReconcileHasHappened { return false } @@ -161,7 +155,7 @@ func (c *consul) isBootstrapped() bool { } // Stop all service watches. -func (c *consul) cleanup(ctx context.Context) error { +func (c *consulWatcher) cleanup(ctx context.Context) error { // XXX: do we care about a clean shutdown /*go func() { <-ctx.Done() @@ -173,7 +167,7 @@ func (c *consul) cleanup(ctx context.Context) error { // Start and stop consul service watches as needed in order to match the supplied set of resolvers // and mappings. -func (c *consul) reconcile(ctx context.Context, resolvers []*amb.ConsulResolver, mappings []consulMapping) error { +func (c *consulWatcher) reconcile(ctx context.Context, resolvers []*amb.ConsulResolver, mappings []consulMapping) error { // ==First we compute resolvers and their related mappings without actualy changing anything.== resolversByName := make(map[string]*amb.ConsulResolver) for _, cr := range resolvers { @@ -239,7 +233,7 @@ func (c *consul) reconcile(ctx context.Context, resolvers []*amb.ConsulResolver, // Finally we reconcile each mapping. for rname, mappings := range mappingsByResolver { res := c.resolvers[rname] - if err := res.reconcile(ctx, c.watcher, mappings, c.endpointsCh); err != nil { + if err := res.reconcile(ctx, c.watchFunc, mappings, c.endpointsCh); err != nil { return err } } @@ -276,7 +270,7 @@ func (r *resolver) deleted() { } } -func (r *resolver) reconcile(ctx context.Context, watcher Watcher, mappings []consulMapping, endpoints chan consulwatch.Endpoints) error { +func (r *resolver) reconcile(ctx context.Context, watchFunc watchConsulFunc, mappings []consulMapping, endpoints chan consulwatch.Endpoints) error { servicesByName := make(map[string]bool) for _, m := range mappings { // XXX: how to parse this? @@ -285,7 +279,7 @@ func (r *resolver) reconcile(ctx context.Context, watcher Watcher, mappings []co w, ok := r.watches[svc] if !ok { var err error - w, err = watcher.Watch(ctx, r.resolver, svc, endpoints) + w, err = watchFunc(ctx, r.resolver, svc, endpoints) if err != nil { return err } @@ -303,17 +297,13 @@ func (r *resolver) reconcile(ctx context.Context, watcher Watcher, mappings []co return nil } -type Watcher interface { - Watch(ctx context.Context, resolver *amb.ConsulResolver, svc string, endpoints chan consulwatch.Endpoints) (Stopper, error) -} +type watchConsulFunc func(ctx context.Context, resolver *amb.ConsulResolver, svc string, endpoints chan consulwatch.Endpoints) (Stopper, error) type Stopper interface { Stop() } -type consulWatcher struct{} - -func (cw *consulWatcher) Watch( +func watchConsul( ctx context.Context, resolver *amb.ConsulResolver, svc string, diff --git a/cmd/entrypoint/consul_test.go b/cmd/entrypoint/consul_test.go index d5b2ce16d3..bae03ebbdd 100644 --- a/cmd/entrypoint/consul_test.go +++ b/cmd/entrypoint/consul_test.go @@ -12,6 +12,7 @@ import ( "github.com/datawire/ambassador/v2/pkg/consulwatch" "github.com/datawire/ambassador/v2/pkg/kates" snapshotTypes "github.com/datawire/ambassador/v2/pkg/snapshot/v1" + "github.com/datawire/dlib/dgroup" "github.com/datawire/dlib/dlog" ) @@ -121,8 +122,14 @@ func TestBootstrap(t *testing.T) { assert.True(t, c.isBootstrapped()) } -func setup(t *testing.T) (ctx context.Context, resolvers []*amb.ConsulResolver, mappings []consulMapping, c *consul, tw *testWatcher) { - ctx = dlog.NewTestContext(t, false) +func setup(t *testing.T) (ctx context.Context, resolvers []*amb.ConsulResolver, mappings []consulMapping, c *consulWatcher, tw *testWatcher) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(dlog.NewTestContext(t, false)) + grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) + t.Cleanup(func() { + cancel() + assert.NoError(t, grp.Wait()) + }) parent := &kates.Unstructured{ Object: map[string]interface{}{ @@ -157,7 +164,8 @@ func setup(t *testing.T) (ctx context.Context, resolvers []*amb.ConsulResolver, assert.Equal(t, 4, len(mappings)) tw = &testWatcher{t: t, events: make(map[string]bool)} - c = newConsul(ctx, tw) + c = newConsulWatcher(tw.Watch) + grp.Go("consul", c.run) tw.Assert() return diff --git a/cmd/entrypoint/entrypoint.go b/cmd/entrypoint/entrypoint.go index c23b775968..8d95ae1594 100644 --- a/cmd/entrypoint/entrypoint.go +++ b/cmd/entrypoint/entrypoint.go @@ -189,7 +189,7 @@ func Main(ctx context.Context, Version string, args ...string) error { group.Go("watcher", func(ctx context.Context) error { // We need to pass the AmbassadorWatcher to this (Kubernetes/Consul) watcher, so // that it can tell the AmbassadorWatcher when snapshots are posted. - return watcher(ctx, ambwatch, snapshot, fastpathCh, clusterID, Version) + return WatchAllTheThings(ctx, ambwatch, snapshot, fastpathCh, clusterID, Version) }) } diff --git a/cmd/entrypoint/fswatcher.go b/cmd/entrypoint/fswatcher.go index ec8eb1a7ad..d0e00f0c24 100644 --- a/cmd/entrypoint/fswatcher.go +++ b/cmd/entrypoint/fswatcher.go @@ -118,9 +118,6 @@ func NewFSWatcher(ctx context.Context) (*FSWatcher, error) { // Start with the default error handler... fsw.handleError = fsw.defaultErrorHandler - // ...and then go watch for events. - go fsw.watchForEvents(ctx) - return fsw, nil } @@ -170,7 +167,7 @@ func (fsw *FSWatcher) defaultErrorHandler(ctx context.Context, err error) { } // Watch for events, and handle them. -func (fsw *FSWatcher) watchForEvents(ctx context.Context) { +func (fsw *FSWatcher) Run(ctx context.Context) { for { select { case event := <-fsw.FSW.Events: diff --git a/cmd/entrypoint/fswatcher_test.go b/cmd/entrypoint/fswatcher_test.go index 0bbaafbbb2..ae94819655 100644 --- a/cmd/entrypoint/fswatcher_test.go +++ b/cmd/entrypoint/fswatcher_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/datawire/ambassador/v2/cmd/entrypoint" + "github.com/datawire/dlib/dgroup" "github.com/datawire/dlib/dlog" ) @@ -30,7 +31,12 @@ type fswMetadata struct { } func newMetadata(t *testing.T) (context.Context, *fswMetadata, error) { - ctx := dlog.NewTestContext(t, false) + ctx, cancel := context.WithCancel(dlog.NewTestContext(t, false)) + grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) + t.Cleanup(func() { + cancel() + assert.NoError(t, grp.Wait()) + }) m := &fswMetadata{t: t} m.bootstrapped = make(map[string]bool) m.updates = make(map[string]int) @@ -46,11 +52,14 @@ func newMetadata(t *testing.T) (context.Context, *fswMetadata, error) { } m.fsw, err = entrypoint.NewFSWatcher(ctx) - if err != nil { t.Errorf("could not instantiate FSWatcher: %s", err) return nil, nil, err } + grp.Go("watch", func(ctx context.Context) error { + m.fsw.Run(ctx) + return nil + }) m.fsw.SetErrorHandler(m.errorHandler) diff --git a/cmd/entrypoint/testutil_fake_queue_test.go b/cmd/entrypoint/internal/testqueue/queue.go similarity index 76% rename from cmd/entrypoint/testutil_fake_queue_test.go rename to cmd/entrypoint/internal/testqueue/queue.go index 676a600bce..a304a1928b 100644 --- a/cmd/entrypoint/testutil_fake_queue_test.go +++ b/cmd/entrypoint/internal/testqueue/queue.go @@ -1,6 +1,7 @@ -package entrypoint +package testqueue import ( + "context" "encoding/json" "fmt" "strings" @@ -10,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/datawire/dlib/dgroup" "github.com/datawire/dlib/dlog" ) @@ -17,7 +19,6 @@ import ( // operation (the Get() method) takes a predicate that allows it to skip past queue entries until it // finds one that satisfies the specified predicate. type Queue struct { - T *testing.T timeout time.Duration cond *sync.Cond entries []interface{} @@ -27,29 +28,34 @@ type Queue struct { // NewQueue constructs a new queue with the supplied timeout. func NewQueue(t *testing.T, timeout time.Duration) *Queue { q := &Queue{ - T: t, timeout: timeout, cond: sync.NewCond(&sync.Mutex{}), } - ctx := dlog.NewTestContext(t, false) + ctx, cancel := context.WithCancel(dlog.NewTestContext(t, true)) + grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) + t.Cleanup(func() { + cancel() + assert.NoError(t, grp.Wait()) + }) // Broadcast on Queue.cond every three seconds so that anyone waiting on the condition has a // chance to timeout. (Go doesn't support timed wait on conditions.) - go func() { + grp.Go("ticker", func(ctx context.Context) error { ticker := time.NewTicker(3 * time.Second) for { select { case <-ticker.C: q.cond.Broadcast() case <-ctx.Done(): - return + return nil } } - }() + }) return q } // Add an entry to the queue. -func (q *Queue) Add(obj interface{}) { +func (q *Queue) Add(t *testing.T, obj interface{}) { + t.Helper() q.cond.L.Lock() defer q.cond.L.Unlock() q.entries = append(q.entries, obj) @@ -57,8 +63,8 @@ func (q *Queue) Add(obj interface{}) { } // Get will return the next entry that satisfies the supplied predicate. -func (q *Queue) Get(predicate func(interface{}) bool) (interface{}, error) { - q.T.Helper() +func (q *Queue) Get(t *testing.T, predicate func(interface{}) bool) (interface{}, error) { + t.Helper() start := time.Now() q.cond.L.Lock() defer q.cond.L.Unlock() @@ -89,17 +95,17 @@ func (q *Queue) Get(predicate func(interface{}) bool) (interface{}, error) { msg.WriteString(fmt.Sprintf("\n--- Queue Entry[%d] %s---\n%s\n", idx, extra, string(bytes))) } - q.T.Fatal(fmt.Sprintf("Get timed out!\n%s", msg)) + t.Fatalf("Get timed out!\n%s", msg) } q.cond.Wait() } } // AssertEmpty will check that the queue remains empty for the supplied duration. -func (q *Queue) AssertEmpty(timeout time.Duration, msg string) { - q.T.Helper() +func (q *Queue) AssertEmpty(t *testing.T, timeout time.Duration, msg string) { + t.Helper() time.Sleep(timeout) q.cond.L.Lock() defer q.cond.L.Unlock() - assert.Empty(q.T, q.entries, msg) + assert.Empty(t, q.entries, msg) } diff --git a/cmd/entrypoint/testutil_fake_queue_test_test.go b/cmd/entrypoint/internal/testqueue/queue_test.go similarity index 62% rename from cmd/entrypoint/testutil_fake_queue_test_test.go rename to cmd/entrypoint/internal/testqueue/queue_test.go index f25c430493..9e6c152436 100644 --- a/cmd/entrypoint/testutil_fake_queue_test_test.go +++ b/cmd/entrypoint/internal/testqueue/queue_test.go @@ -1,24 +1,24 @@ -package entrypoint_test +package testqueue_test import ( "testing" "time" - "github.com/datawire/ambassador/v2/cmd/entrypoint" + "github.com/datawire/ambassador/v2/cmd/entrypoint/internal/testqueue" "github.com/stretchr/testify/require" ) func TestFakeQueueGet(t *testing.T) { - q := entrypoint.NewQueue(t, 10*time.Second) + q := testqueue.NewQueue(t, 10*time.Second) go func() { for count := 0; count < 10; count++ { - q.Add(count) + q.Add(t, count) } }() for count := 0; count < 10; count++ { - obj, err := q.Get(func(obj interface{}) bool { + obj, err := q.Get(t, func(obj interface{}) bool { return true }) require.NoError(t, err) @@ -27,16 +27,16 @@ func TestFakeQueueGet(t *testing.T) { } func TestFakeQueueSkip(t *testing.T) { - q := entrypoint.NewQueue(t, 10*time.Second) + q := testqueue.NewQueue(t, 10*time.Second) go func() { for count := 0; count < 10; count++ { - q.Add(count) + q.Add(t, count) } }() for count := 0; count < 10; count += 2 { - obj, err := q.Get(func(obj interface{}) bool { + obj, err := q.Get(t, func(obj interface{}) bool { i := obj.(int) return (i % 2) == 0 }) diff --git a/cmd/entrypoint/istiocert.go b/cmd/entrypoint/istiocert.go index 38f46efb1d..198bd347b3 100644 --- a/cmd/entrypoint/istiocert.go +++ b/cmd/entrypoint/istiocert.go @@ -60,6 +60,7 @@ func (src *istioCertSource) Watch(ctx context.Context) (IstioCertWatcher, error) if err != nil { return nil, err } + go fsw.Run(ctx) // ...then tell the FSWatcher to watch the Istio cert directory, // and give it a handler function that'll update the IstioCert diff --git a/cmd/entrypoint/testutil_fake_hello_test.go b/cmd/entrypoint/testutil_fake_hello_test.go index 76eec8897d..186cb8369f 100644 --- a/cmd/entrypoint/testutil_fake_hello_test.go +++ b/cmd/entrypoint/testutil_fake_hello_test.go @@ -142,25 +142,27 @@ func FindCluster(envoyConfig *v3bootstrap.Bootstrap, predicate func(*v3cluster.C return nil } -func deltaSummary(snap *snapshot.Snapshot) []string { +func deltaSummary(t *testing.T, snaps ...*snapshot.Snapshot) []string { summary := []string{} var typestr string - for _, delta := range snap.Deltas { - switch delta.DeltaType { - case kates.ObjectAdd: - typestr = "add" - case kates.ObjectUpdate: - typestr = "update" - case kates.ObjectDelete: - typestr = "delete" - default: - // Bug because the programmer needs to add another case here. - panic(fmt.Errorf("missing case for DeltaType enum: %#v", delta)) + for _, snap := range snaps { + for _, delta := range snap.Deltas { + switch delta.DeltaType { + case kates.ObjectAdd: + typestr = "add" + case kates.ObjectUpdate: + typestr = "update" + case kates.ObjectDelete: + typestr = "delete" + default: + // Bug because the programmer needs to add another case here. + t.Fatalf("missing case for DeltaType enum: %#v", delta) + } + + summary = append(summary, fmt.Sprintf("%s %s %s", typestr, delta.Kind, delta.Name)) } - - summary = append(summary, fmt.Sprintf("%s %s %s", typestr, delta.Kind, delta.Name)) } sort.Strings(summary) @@ -168,6 +170,27 @@ func deltaSummary(snap *snapshot.Snapshot) []string { return summary } +// getSnapshots is like f.GetSnapshot, but returns the list of every snapshot evaluated (rather than +// discarding the snapshots from before predicate returns true). This is particularly important if +// you're looking at deltas; you don't want to discard any deltas just because two snapshots didn't +// get coalesced. +func getSnapshots(f *entrypoint.Fake, predicate func(*snapshot.Snapshot) bool) ([]*snapshot.Snapshot, error) { + var ret []*snapshot.Snapshot + for { + snap, err := f.GetSnapshot(func(_ *snapshot.Snapshot) bool { + return true + }) + if err != nil { + return nil, err + } + ret = append(ret, snap) + if predicate(snap) { + break + } + } + return ret, nil +} + // This test will cover how to exercise the consul portion of the control plane. In principal it is // the same as supplying kubernetes resources, however it uses the ConsulEndpoint() method to // provide consul data. @@ -242,7 +265,7 @@ func TestFakeHelloConsul(t *testing.T) { assert.Equal(t, "consul-server.default:8500", snap.Kubernetes.ConsulResolvers[0].Spec.Address) // Check that our deltas are what we expect. - assert.Equal(t, []string{"add ConsulResolver consul-dc1", "add Mapping hello", "add TCPMapping hello-tcp"}, deltaSummary(snap)) + assert.Equal(t, []string{"add ConsulResolver consul-dc1", "add Mapping hello", "add TCPMapping hello-tcp"}, deltaSummary(t, snap)) // Create a predicate that will recognize the cluster we care about. The surjection from // Mappings to clusters is a bit opaque, so we just look for a cluster that contains the name @@ -309,7 +332,7 @@ spec: require.NoError(t, err) // ...with one delta, namely the ConsulResolver... - assert.Equal(t, []string{"update ConsulResolver consul-dc1"}, deltaSummary(snap)) + assert.Equal(t, []string{"update ConsulResolver consul-dc1"}, deltaSummary(t, snap)) // ...where the mapping name hasn't changed... assert.Equal(t, "hello", snap.Kubernetes.Mappings[0].Name) @@ -337,14 +360,16 @@ spec: f.Flush() // Repeat all the checks. - snap, err = f.GetSnapshot(func(snap *snapshot.Snapshot) bool { + snaps, err := getSnapshots(f, func(snap *snapshot.Snapshot) bool { return (len(snap.Kubernetes.Mappings) > 0) && (len(snap.Kubernetes.TCPMappings) > 0) && (len(snap.Kubernetes.ConsulResolvers) > 0) }) require.NoError(t, err) + require.Greater(t, len(snaps), 0) + snap = snaps[len(snaps)-1] // Two deltas here since we've deleted and re-added without a check in between. // (They appear out of order here because of string sorting. Don't panic.) - assert.Equal(t, []string{"add ConsulResolver consul-dc1", "delete ConsulResolver consul-dc1"}, deltaSummary(snap)) + assert.Equal(t, []string{"add ConsulResolver consul-dc1", "delete ConsulResolver consul-dc1"}, deltaSummary(t, snaps...)) // ...one mapping... assert.Equal(t, "hello", snap.Kubernetes.Mappings[0].Name) diff --git a/cmd/entrypoint/testutil_fake_test.go b/cmd/entrypoint/testutil_fake_test.go index a15d88ae5b..45777c41c0 100644 --- a/cmd/entrypoint/testutil_fake_test.go +++ b/cmd/entrypoint/testutil_fake_test.go @@ -12,6 +12,7 @@ import ( "time" "github.com/datawire/ambassador/v2/cmd/ambex" + "github.com/datawire/ambassador/v2/cmd/entrypoint/internal/testqueue" v3bootstrap "github.com/datawire/ambassador/v2/pkg/api/envoy/config/bootstrap/v3" amb "github.com/datawire/ambassador/v2/pkg/api/getambassador.io/v3alpha1" "github.com/datawire/ambassador/v2/pkg/consulwatch" @@ -69,9 +70,9 @@ type Fake struct { // This holds the current snapshot. currentSnapshot *atomic.Value - fastpath *Queue // All fastpath snapshots that have been produced. - snapshots *Queue // All snapshots that have been produced. - envoyConfigs *Queue // All envoyConfigs that have been produced. + fastpath *testqueue.Queue // All fastpath snapshots that have been produced. + snapshots *testqueue.Queue // All snapshots that have been produced. + envoyConfigs *testqueue.Queue // All envoyConfigs that have been produced. // This is used to make Teardown idempotent. teardownOnce sync.Once @@ -113,9 +114,9 @@ func NewFake(t *testing.T, config FakeConfig) *Fake { currentSnapshot: &atomic.Value{}, - fastpath: NewQueue(t, config.Timeout), - snapshots: NewQueue(t, config.Timeout), - envoyConfigs: NewQueue(t, config.Timeout), + fastpath: testqueue.NewQueue(t, config.Timeout), + snapshots: testqueue.NewQueue(t, config.Timeout), + envoyConfigs: testqueue.NewQueue(t, config.Timeout), } fake.k8sSource = &fakeK8sSource{fake: fake, store: k8sStore} @@ -202,12 +203,12 @@ func (f *Fake) runWatcher(ctx context.Context) error { interestingTypes := GetInterestingTypes(ctx, nil) queries := GetQueries(ctx, interestingTypes) - return watcherLoop( + return watchAllTheThingsInternal( ctx, f.currentSnapshot, // encoded f.k8sSource, queries, - f.watcher, // consulWatcher + f.watcher.Watch, // watchConsulFunc f.istioCertSource, f.notifySnapshot, f.notifyFastpath, @@ -216,12 +217,12 @@ func (f *Fake) runWatcher(ctx context.Context) error { } func (f *Fake) notifyFastpath(ctx context.Context, fastpath *ambex.FastpathSnapshot) { - f.fastpath.Add(fastpath) + f.fastpath.Add(f.T, fastpath) } func (f *Fake) GetEndpoints(predicate func(*ambex.Endpoints) bool) (*ambex.Endpoints, error) { f.T.Helper() - untyped, err := f.fastpath.Get(func(obj interface{}) bool { + untyped, err := f.fastpath.Get(f.T, func(obj interface{}) bool { fastpath := obj.(*ambex.FastpathSnapshot) return predicate(fastpath.Endpoints) }) @@ -233,7 +234,7 @@ func (f *Fake) GetEndpoints(predicate func(*ambex.Endpoints) bool) (*ambex.Endpo func (f *Fake) AssertEndpointsEmpty(timeout time.Duration) { f.T.Helper() - f.fastpath.AssertEmpty(timeout, "endpoints queue not empty") + f.fastpath.AssertEmpty(f.T, timeout, "endpoints queue not empty") } type SnapshotEntry struct { @@ -241,6 +242,14 @@ type SnapshotEntry struct { Snapshot *snapshot.Snapshot } +func (entry SnapshotEntry) String() string { + snapshot := "nil" + if entry.Snapshot != nil { + snapshot = fmt.Sprintf("&%#v", *entry.Snapshot) + } + return fmt.Sprintf("{Disposition: %v, Snapshot: %s}", entry.Disposition, snapshot) +} + // We pass this into the watcher loop to get notified when a snapshot is produced. func (f *Fake) notifySnapshot(ctx context.Context, disp SnapshotDisposition, snapJSON []byte) error { if disp == SnapshotReady && f.config.EnvoyConfig { @@ -256,14 +265,14 @@ func (f *Fake) notifySnapshot(ctx context.Context, disp SnapshotDisposition, sna f.T.Fatalf("error decoding snapshot: %+v", err) } - f.snapshots.Add(SnapshotEntry{disp, snap}) + f.snapshots.Add(f.T, SnapshotEntry{disp, snap}) return nil } // GetSnapshotEntry will return the next SnapshotEntry that satisfies the supplied predicate. func (f *Fake) GetSnapshotEntry(predicate func(SnapshotEntry) bool) (SnapshotEntry, error) { f.T.Helper() - untyped, err := f.snapshots.Get(func(obj interface{}) bool { + untyped, err := f.snapshots.Get(f.T, func(obj interface{}) bool { entry := obj.(SnapshotEntry) return predicate(entry) }) @@ -291,13 +300,13 @@ func (f *Fake) appendEnvoyConfig(ctx context.Context) { f.T.Fatalf("error decoding envoy.json after sending snapshot to python: %+v", err) } bs := msg.(*v3bootstrap.Bootstrap) - f.envoyConfigs.Add(bs) + f.envoyConfigs.Add(f.T, bs) } // GetEnvoyConfig will return the next envoy config that satisfies the supplied predicate. func (f *Fake) GetEnvoyConfig(predicate func(*v3bootstrap.Bootstrap) bool) (*v3bootstrap.Bootstrap, error) { f.T.Helper() - untyped, err := f.envoyConfigs.Get(func(obj interface{}) bool { + untyped, err := f.envoyConfigs.Get(f.T, func(obj interface{}) bool { return predicate(obj.(*v3bootstrap.Bootstrap)) }) if err != nil { diff --git a/cmd/entrypoint/watcher.go b/cmd/entrypoint/watcher.go index d3f0d6ddad..5e74d32538 100644 --- a/cmd/entrypoint/watcher.go +++ b/cmd/entrypoint/watcher.go @@ -17,10 +17,11 @@ import ( "github.com/datawire/ambassador/v2/pkg/gateway" "github.com/datawire/ambassador/v2/pkg/kates" "github.com/datawire/ambassador/v2/pkg/snapshot/v1" + "github.com/datawire/dlib/dgroup" "github.com/datawire/dlib/dlog" ) -func watcher( +func WatchAllTheThings( ctx context.Context, ambwatch *acp.AmbassadorWatcher, encoded *atomic.Value, @@ -59,15 +60,15 @@ func watcher( } k8sSrc := newK8sSource(client) - consulSrc := &consulWatcher{} + consulSrc := watchConsul istioCertSrc := newIstioCertSource() - return watcherLoop( + return watchAllTheThingsInternal( ctx, encoded, k8sSrc, queries, - consulSrc, // consulWatcher + consulSrc, // watchConsulFunc istioCertSrc, notify, // snapshotProcessor fastpathUpdate, // fastpathProcessor @@ -89,6 +90,7 @@ func getAmbassadorMeta(ambassadorID string, clusterID string, version string, cl } type SnapshotProcessor func(context.Context, SnapshotDisposition, []byte) error + type SnapshotDisposition int const ( @@ -104,6 +106,19 @@ const ( SnapshotReady ) +func (disposition SnapshotDisposition) String() string { + ret, ok := map[SnapshotDisposition]string{ + SnapshotIncomplete: "SnapshotIncomplete", + SnapshotDefer: "SnapshotDefer", + SnapshotDrop: "SnapshotDrop", + SnapshotReady: "SnapshotReady", + }[disposition] + if !ok { + return fmt.Sprintf("%[1]T(%[1]d)", disposition) + } + return ret +} + type FastpathProcessor func(context.Context, *ambex.FastpathSnapshot) // watcher is _the_ thing that watches all the different kinds of Ambassador configuration @@ -151,31 +166,31 @@ type FastpathProcessor func(context.Context, *ambex.FastpathSnapshot) // // 4. If you don't fully understand everything above, _do not touch this function without // guidance_. -func watcherLoop( +func watchAllTheThingsInternal( ctx context.Context, encoded *atomic.Value, k8sSrc K8sSource, queries []kates.Query, - consulWatcher Watcher, + watchConsulFunc watchConsulFunc, istioCertSrc IstioCertSource, snapshotProcessor SnapshotProcessor, fastpathProcessor FastpathProcessor, ambassadorMeta *snapshot.AmbassadorMetaInfo, ) error { - // Ambassador has three sources of inputs: kubernetes, consul, and the filesystem. The job of - // the watcherLoop is to read updates from all three of these sources, assemble them into a - // single coherent configuration, and pass them along to other parts of ambassador for - // processing. + // Ambassador has three sources of inputs: kubernetes, consul, and the filesystem. The job + // of the watchAllTheThingsInternal loop is to read updates from all three of these sources, + // assemble them into a single coherent configuration, and pass them along to other parts of + // ambassador for processing. - // The watcherLoop must decide what information is relevant to solicit from each source. This is - // decided a bit differently for each source. + // The watchAllTheThingsInternal loop must decide what information is relevant to solicit + // from each source. This is decided a bit differently for each source. // // For kubernetes the set of subscriptions is basically hardcoded to the set of resources - // defined in interesting_types.go, this is filtered down at boot based on RBAC limitations. The - // filtered list is used to construct the queries that are passed into this function, and that - // set of queries remains fixed for the lifetime of the loop, i.e. the lifetime of the - // abmassador process (unless we are testing, in which case we may run the watcherLoop more than - // once in a single process). + // defined in interesting_types.go, this is filtered down at boot based on RBAC + // limitations. The filtered list is used to construct the queries that are passed into this + // function, and that set of queries remains fixed for the lifetime of the loop, i.e. the + // lifetime of the abmassador process (unless we are testing, in which case we may run the + // watchAllTheThingsInternal loop more than once in a single process). // // For the consul source we derive the set of resources to watch based on the configuration in // kubernetes, i.e. we watch the services defined in Mappings that are configured to use a @@ -184,6 +199,8 @@ func watcherLoop( // // The filesystem datasource is for istio secrets. XXX fill in more + grp := dgroup.NewGroup(ctx, dgroup.GroupConfig{}) + // Each time the wathcerLoop wakes up, it assembles updates from whatever source woke it up into // its view of the world. It then determines if enough information has been assembled to // consider ambassador "booted" and if so passes the updated view along to its output (the @@ -196,7 +213,8 @@ func watcherLoop( if err != nil { return err } - consul := newConsul(ctx, consulWatcher) + consulWatcher := newConsulWatcher(watchConsulFunc) + grp.Go("consul", consulWatcher.run) istioCertWatcher, err := istioCertSrc.Watch(ctx) if err != nil { return err @@ -216,56 +234,60 @@ func watcherLoop( // information. This is deliberately nil to begin with as we have nothing to send yet. var out chan *SnapshotHolder notifyCh := make(chan *SnapshotHolder) - go func() { + grp.Go("notifyCh", func(ctx context.Context) error { for { select { case sh := <-notifyCh: - if err := sh.Notify(ctx, encoded, consul, snapshotProcessor); err != nil { - panic(err) // TODO: Find a better way of reporting errors from goroutines. + if err := sh.Notify(ctx, encoded, consulWatcher, snapshotProcessor); err != nil { + return err } case <-ctx.Done(): - return + return nil } } - }() + }) - for { - dlog.Debugf(ctx, "WATCHER: --------") - - // XXX Hack: the istioCertWatchManager needs to reset at the start of the - // loop, for now. A better way, I think, will be to instead track deltas in - // ReconcileSecrets -- that way we can ditch this crap and Istio-cert changes - // that somehow don't generate an actual change will still not trigger a - // reconfigure. - istio.StartLoop(ctx) - - select { - case <-k8sWatcher.Changed(): - // Kubernetes has some changes, so we need to handle them. - changed, err := snapshots.K8sUpdate(ctx, k8sWatcher, consul, fastpathProcessor) - if err != nil { - return err - } - if !changed { - continue - } - out = notifyCh - case <-consul.changed(): - dlog.Debugf(ctx, "WATCHER: Consul fired") - snapshots.ConsulUpdate(ctx, consul, fastpathProcessor) - out = notifyCh - case icertUpdate := <-istio.Changed(): - // The Istio cert has some changes, so we need to handle them. - if _, err := snapshots.IstioUpdate(ctx, istio, icertUpdate); err != nil { - return err + grp.Go("loop", func(ctx context.Context) error { + for { + dlog.Debugf(ctx, "WATCHER: --------") + + // XXX Hack: the istioCertWatchManager needs to reset at the start of the + // loop, for now. A better way, I think, will be to instead track deltas in + // ReconcileSecrets -- that way we can ditch this crap and Istio-cert changes + // that somehow don't generate an actual change will still not trigger a + // reconfigure. + istio.StartLoop(ctx) + + select { + case <-k8sWatcher.Changed(): + // Kubernetes has some changes, so we need to handle them. + changed, err := snapshots.K8sUpdate(ctx, k8sWatcher, consulWatcher, fastpathProcessor) + if err != nil { + return err + } + if !changed { + continue + } + out = notifyCh + case <-consulWatcher.changed(): + dlog.Debugf(ctx, "WATCHER: Consul fired") + snapshots.ConsulUpdate(ctx, consulWatcher, fastpathProcessor) + out = notifyCh + case icertUpdate := <-istio.Changed(): + // The Istio cert has some changes, so we need to handle them. + if _, err := snapshots.IstioUpdate(ctx, istio, icertUpdate); err != nil { + return err + } + out = notifyCh + case out <- snapshots: + out = nil + case <-ctx.Done(): + return nil } - out = notifyCh - case out <- snapshots: - out = nil - case <-ctx.Done(): - return nil } - } + }) + + return grp.Wait() } // SnapshotHolder is responsible for holding @@ -340,7 +362,7 @@ func NewSnapshotHolder(ambassadorMeta *snapshot.AmbassadorMetaInfo) (*SnapshotHo func (sh *SnapshotHolder) K8sUpdate( ctx context.Context, watcher K8sWatcher, - consul *consul, + consulWatcher *consulWatcher, fastpathProcessor FastpathProcessor, ) (bool, error) { dbg := debug.FromContext(ctx) @@ -413,7 +435,7 @@ func (sh *SnapshotHolder) K8sUpdate( return false, err } reconcileConsulTimer.Time(func() { - err = ReconcileConsul(ctx, consul, sh.k8sSnapshot) + err = ReconcileConsul(ctx, consulWatcher, sh.k8sSnapshot) }) if err != nil { return false, err @@ -492,13 +514,13 @@ func (sh *SnapshotHolder) K8sUpdate( return changed, nil } -func (sh *SnapshotHolder) ConsulUpdate(ctx context.Context, consul *consul, fastpathProcessor FastpathProcessor) bool { +func (sh *SnapshotHolder) ConsulUpdate(ctx context.Context, consulWatcher *consulWatcher, fastpathProcessor FastpathProcessor) bool { var endpoints *ambex.Endpoints var dispSnapshot *ecp_v2_cache.Snapshot func() { sh.mutex.Lock() defer sh.mutex.Unlock() - consul.update(sh.consulSnapshot) + consulWatcher.update(sh.consulSnapshot) endpoints = makeEndpoints(ctx, sh.k8sSnapshot, sh.consulSnapshot.Endpoints) _, dispSnapshot = sh.dispatcher.GetSnapshot(ctx) }() @@ -538,7 +560,7 @@ func (sh *SnapshotHolder) IstioUpdate(ctx context.Context, istio *istioCertWatch func (sh *SnapshotHolder) Notify( ctx context.Context, encoded *atomic.Value, - consul *consul, + consulWatcher *consulWatcher, snapshotProcessor SnapshotProcessor, ) error { dbg := debug.FromContext(ctx) @@ -573,7 +595,7 @@ func (sh *SnapshotHolder) Notify( return err } - bootstrapped = consul.isBootstrapped() + bootstrapped = consulWatcher.isBootstrapped() if bootstrapped { sh.unsentDeltas = nil if sh.firstReconfig { diff --git a/docker/base-pip/.gitignore b/docker/base-pip/.gitignore new file mode 100644 index 0000000000..1c7863b295 --- /dev/null +++ b/docker/base-pip/.gitignore @@ -0,0 +1 @@ +/requirements.txt diff --git a/docker/base-pip/Dockerfile b/docker/base-pip/Dockerfile new file mode 100644 index 0000000000..8920f8e44b --- /dev/null +++ b/docker/base-pip/Dockerfile @@ -0,0 +1,24 @@ +# syntax = docker/dockerfile:1.3 + +### +# This dockerfile builds the base image for the builder container. See +# the main Dockerfile for more information about what the builder +# container is and how code in this repo is built. +# +# Originally this base was built as part of the builder container's +# bootstrap process. We discovered that minor network interruptions +# would break these steps, and such interruptions were common on our +# cloud CI system. We decided to separate out these steps so that any +# one of them is much less likely to be the cause of a network-related +# failure, i.e. a flake. +# +# See the comment before the build_builder_base() function in builder.sh +# to see when and how often this base image is built and pushed. +## + +ARG from="i-forgot-to-set-build-arg-from" + +FROM ${from} + +COPY requirements.txt . +RUN --mount=type=cache,target=/root/.cache/pip pip3 install -r requirements.txt && rm requirements.txt diff --git a/docker/base-python.docker.gen b/docker/base-python.docker.gen new file mode 100755 index 0000000000..d3a9213861 --- /dev/null +++ b/docker/base-python.docker.gen @@ -0,0 +1,201 @@ +#!/usr/bin/env bash +set -euE + +# Usage: ./docker/base-python.docker.gen > docker/.base-python.docker.stamp +# +# base-python.docker.gen is essentially just a 4 line script: +# +# iidfile=$(mktemp) +# trap 'rm -f "$iidfile"' EXIT +# docker build --iidfile="$iidfile" docker/base-python >&2 +# cat "$iidfile" +# +# However, it has "optimizations" because that `docker build` is +# really slow and painful: +# +# 0. (not an speed improvement itself, but nescessary for what +# follows) generate a deterministic Docker tag based on the +# inputs to the image; a sort of content-addressable scheme that +# doesn't rely on having built the image first. +# +# 1. Rather than building the image locally, try to pull it +# pre-build from any of the following Docker repos: +# +# - $BASE_PYTHON_REPO +# - ${DEV_REGISTRY}/base-python +# - docker.io/emissaryingress/base-python +# +# 2. If we do build it locally (because it couldn't be pulled), then +# try pushing it to those Docker repos, so that +# others/our-future-self can benefit from (1). + +OFF='' +BLD='' +RED='' +GRN='' +BLU='' +if tput setaf 0 &>/dev/null; then + OFF="$(tput sgr0)" + BLD="$(tput bold)" + RED="$(tput setaf 1)" + GRN="$(tput setaf 2)" + BLU="$(tput setaf 4)" +fi + +msg() { + # shellcheck disable=SC2059 + printf "${BLU} => [${0##*/}]${OFF} $1${OFF}\n" "${@:2}" >&2 +} + +stat_busy() { + # shellcheck disable=SC2059 + printf "${BLU} => [${0##*/}]${OFF} $1...${OFF}" "${@:2}" >&2 +} + +stat_done() { + # shellcheck disable=SC2059 + printf " ${1:-done}${OFF}\n" >&2 +} + +statnl_busy() { + stat_busy "$@" + printf '\n' >&2 +} + +statnl_done() { + # shellcheck disable=SC2059 + printf "${BLU} => [${0##*/}]${OFF} ...${1:-done}${OFF}\n" >&2 +} + +error() { + # shellcheck disable=SC2059 + printf "${RED} => [${0##*/}] ${BLD}error:${OFF} $1${OFF}\n" "${@:2}" >&2 +} + +# Usage: tag=$(print-tag) +# +# print-tag generates and prints a Docker tag (without the leading +# "REPO:" part) for the image, based on the inputs to the image. +# +# The inputs we care about (i.e. the things that should trigger a +# rebuild) are: +# +# - The `docker/base-python/Dockerfile` file. +# +# - Whatever unpinned remote 3rd-party resources that Dockerfile +# pulls in (mostly the Alpine package repos); but because we don't +# have the whole repos as a file on disk, we fall back to a +# truncated timestamp. This means that we rebuild periodically to +# make sure we don't fall too far behind and then get surprised +# when a rebuild is required for Dockerfile changes.) We have +# defined "enough time" as a few days. See the variable +# "build_every_n_days" below. +print-tag() { + python3 -c ' +import datetime, hashlib + +# Arrange these 2 variables to reduce the likelihood that build_every_n_days +# passes in the middle of a CI workflow; have it happen weekly during the +# weekend. +build_every_n_days = 7 # Periodic rebuild even if Dockerfile does not change +epoch = datetime.datetime(2020, 11, 8, 5, 0) # 1AM EDT on a Sunday + +age = int((datetime.datetime.now() - epoch).days / build_every_n_days) +age_start = epoch + datetime.timedelta(days=age*build_every_n_days) + +dockerfilehash = hashlib.sha256(open("docker/base-python/Dockerfile", "rb").read()).hexdigest() + +print("%sx%s-%s" % (age_start.strftime("%Y%m%d"), build_every_n_days, dockerfilehash[:16])) +' +} + +main() { + local tag + tag=$(print-tag) + + # `repos` is a list of Docker repos where the base-python image + # gets pulled-from/pushed-to. + # + # When pulling, we go down the list until we find a repo we + # can successfully pull from; returning after the first + # success; if we make through the list without a success, then + # we build the image locally. + # + # When pushing, we attempt to push to *every* repo, but ignore + # failures unless they *all* fail. + local repos=() + + # add_repo REPO appends a repo to ${repos[@]}; if ${repos[@]} + # doesn't already contain REPO. + add_repo() { + local + needle="$1" + for straw in "${repos[@]}"; do + if [[ "$straw" == "$needle" ]]; then + return + fi + done + repos+=("$needle") + } + if [[ -n "${BASE_PYTHON_REPO:-}" ]]; then + add_repo "$BASE_PYTHON_REPO" + fi + if [[ -n "${DEV_REGISTRY:-}" ]]; then + add_repo "${DEV_REGISTRY}/base-python" + fi + # We always include docker.io/emissaryingress/base-python as a + # fallback, because rebuilding orjson takes so long that we + # really want a cache-hit if at all possible. + add_repo 'docker.io/emissaryingress/base-python' + + # Download + local id='' + for repo in "${repos[@]}"; do + stat_busy 'Checking if %q exists locally' "$repo:$tag" + if docker image inspect "$repo:$tag" &>/dev/null; then + stat_done "${GRN}yes" + id=$(docker image inspect "$repo:$tag" --format='{{.Id}}') + break + fi + stat_done "${RED}no" + + stat_busy 'Checking if %q can be pulled' "$repo:$tag" + if docker pull "$repo:$tag" &>/dev/null; then + stat_done "${GRN}yes" + id=$(docker image inspect "$repo:$tag" --format='{{.Id}}') + break + fi + stat_done "${RED}no" + done + + if [[ -z "$id" ]]; then + # Build + statnl_busy 'Building %q locally' "base-python:$tag" + iidfile=$(mktemp) + trap 'rm -f "$iidfile"' RETURN + docker build --iidfile="$iidfile" docker/base-python >&2 + id=$(cat "$iidfile") + statnl_done 'done building' + + # Push + pushed=0 + for repo in "${repos[@]}"; do + statnl_busy 'Attempting to push %q' "$repo:$tag" + docker tag "$id" "$repo:$tag" >&2 + if docker push "$repo:$tag" >&2; then + statnl_done "${GRN}pushed" + pushed=1 + continue + fi + statnl_done "${RED}failed to push" + done + if ! (( pushed )); then + error "Could not push locally-built image to any remote repositories" + return 1 + fi + fi + + printf '%s\n' "$id" +} + +main "$@" diff --git a/builder/Dockerfile.base b/docker/base-python/Dockerfile similarity index 51% rename from builder/Dockerfile.base rename to docker/base-python/Dockerfile index 51e1aedfa8..6f5d29e99f 100644 --- a/builder/Dockerfile.base +++ b/docker/base-python/Dockerfile @@ -14,16 +14,11 @@ # to see when and how often this base image is built and pushed. ## -# This argument controls the base image that is used for our build -# container. -ARG builderbase_stage0="docker.io/frolvlad/alpine-glibc:alpine-3.12_glibc-2.32" -ARG builderbase_stage1="builderbase-stage1" - ######################################## # Third-party code ######################################## -FROM ${builderbase_stage0} as builderbase-stage1 +FROM docker.io/frolvlad/alpine-glibc:alpine-3.15 WORKDIR /buildroot @@ -45,31 +40,24 @@ RUN apk --no-cache add \ libffi-dev \ ncurses \ openssl-dev \ - py3-pip \ - python3=~3.8.10 \ + py3-pip=~20.3.4 \ + python3=~3.9.7 \ python3-dev \ + rust \ + cargo \ + patchelf \ rsync \ sudo \ yaml-dev \ && ln -s /usr/bin/python3 /usr/bin/python \ && chmod u+s $(which docker) -RUN mkdir /tmp/busybox \ - && cd /tmp/busybox \ - && curl -O https://busybox.net/downloads/busybox-1.34.1.tar.bz2 \ - && tar -xjf busybox-1.34.1.tar.bz2 \ - && cd busybox-1.34.1 \ - && make defconfig \ - && make busybox \ - && ls -l /bin \ - && mv busybox /bin/busybox +# Consult +# https://github.com/jazzband/pip-tools/#versions-and-compatibility to +# select a pip-tools version that corresponds to the 'py3-pip' and +# 'python3' versions above. +RUN pip3 install pip-tools==6.3.1 -# We _must_ pin pip to a version before 20.3 because orjson appears to only have -# PEP513 compatible wheels, which are supported before 20.3 but (apparently) -# not in 20.3. We can only upgrade pip to 20.3 after we verify that orjson has -# PEP600 compatible wheels for our linux platform, or we start building orjson -# from source using a rust toolchain. -RUN pip3 install -U pip==20.2.4 pip-tools==5.3.1 RUN curl --fail -L https://dl.google.com/go/go1.17.1.linux-amd64.tar.gz | tar -C /usr/local -xzf - RUN curl --fail -L https://storage.googleapis.com/kubernetes-release/release/v1.22.2/bin/linux/amd64/kubectl -o /usr/bin/kubectl && \ @@ -77,7 +65,7 @@ RUN curl --fail -L https://storage.googleapis.com/kubernetes-release/release/v1. # The YAML parser is... special. To get the C version, we need to install Cython and libyaml, then # build it locally -- just using pip won't work. - +# # Download, build, and install PyYAML. RUN mkdir /tmp/pyyaml && \ cd /tmp/pyyaml && \ @@ -86,23 +74,6 @@ RUN mkdir /tmp/pyyaml && \ cd pyyaml-5.4.1.1 && \ python3 setup.py --with-libyaml install -# Installing `requests` pulls in both `chardet` and `charset_normalizer`. Only one of these is -# needed, so we'll uninstall `chardet` since it's GPL-licensed. -RUN pip3 uninstall -y chardet - -FROM ${builderbase_stage1} as builderbase-stage2 - -# orjson is also special. It relies on glibc, so we need to temporarily convince -# python that our linux is 'manylinux1_compatible' so it will fetch a prebuilt -# binary instead of unsucessfully attempting to build using a rust toolchain. -# -# We can't leave this trick in place for the entire pip install, because other -# packages we depend on (eg: protobuf) have binary packages that do _not_ work -# on our linux. -RUN echo 'manylinux1_compatible = True' > /usr/lib/python3.8/_manylinux.py && \ - pip3 install orjson==3.3.1 && \ - rm -f /usr/lib/python3.8/_manylinux.py - -# Then we can do the rest of the Python stuff. -COPY requirements.txt . -RUN pip3 install -r requirements.txt +# orjson is also special. The wheels on PyPI rely on glibc, so we +# need to use cargo/rustc/patchelf to build a musl-compatible version. +RUN pip3 install orjson==3.6.6 diff --git a/docker/test-stats/Dockerfile b/docker/test-stats/Dockerfile index ed69acbf35..5dc445909a 100644 --- a/docker/test-stats/Dockerfile +++ b/docker/test-stats/Dockerfile @@ -15,7 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License -FROM alpine:3.11 +FROM docker.io/frolvlad/alpine-glibc:alpine-3.15 MAINTAINER Datawire LABEL PROJECT_REPO_URL = "git@github.com:datawire/ambassador.git" \ diff --git a/docs/releaseNotes.yml b/docs/releaseNotes.yml index 77f9756829..f25ba5a18b 100644 --- a/docs/releaseNotes.yml +++ b/docs/releaseNotes.yml @@ -84,6 +84,12 @@ items: - title: 4040 link: https://github.com/emissary-ingress/emissary/pull/4040 + - title: Update to Alpine 3.15 + type: security + body: >- + Emissary has been upgraded from Alpine 3.12 to Alpine 3.15, which incorporates numerous + security patches. + - version: 2.1.2 prevVersion: 2.1.0 date: '2022-01-25' diff --git a/k8s-config/emissary-defaultns-agent/require.yaml b/k8s-config/emissary-defaultns-agent/require.yaml new file mode 100644 index 0000000000..c45fa77998 --- /dev/null +++ b/k8s-config/emissary-defaultns-agent/require.yaml @@ -0,0 +1,15 @@ +_anchors: + _namespace: &namespace default +resources: + - { kind: ServiceAccount, name: emissary-ingress-agent, namespace: *namespace } + - { kind: ClusterRoleBinding, name: emissary-ingress-agent } + - { kind: ClusterRole, name: emissary-ingress-agent } + - { kind: ClusterRole, name: emissary-ingress-agent-pods } + - { kind: ClusterRole, name: emissary-ingress-agent-rollouts } + - { kind: ClusterRole, name: emissary-ingress-agent-applications } + - { kind: ClusterRole, name: emissary-ingress-agent-deployments } + - { kind: ClusterRole, name: emissary-ingress-agent-endpoints } + - { kind: ClusterRole, name: emissary-ingress-agent-configmaps } + - { kind: Role, name: emissary-ingress-agent-config, namespace: *namespace } + - { kind: RoleBinding, name: emissary-ingress-agent-config, namespace: *namespace } + - { kind: Deployment, name: emissary-ingress-agent, namespace: *namespace } diff --git a/k8s-config/emissary-defaultns-agent/values.yaml b/k8s-config/emissary-defaultns-agent/values.yaml new file mode 100644 index 0000000000..dc9a163803 --- /dev/null +++ b/k8s-config/emissary-defaultns-agent/values.yaml @@ -0,0 +1,31 @@ +deploymentTool: getambassador.io +podAnnotations: + consul.hashicorp.com/connect-inject: 'false' + sidecar.istio.io/inject: 'false' +containerNameOverride: ambassador +restartPolicy: Always +terminationGracePeriodSeconds: "0" +service: + type: LoadBalancer +replicaCount: 3 +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + +resources: + limits: + cpu: 1 + memory: 400Mi + requests: + memory: 100Mi +adminService: + type: NodePort +image: + repository: "$imageRepo$" + tag: "$version$" diff --git a/k8s-config/emissary-defaultns-migration/require.yaml b/k8s-config/emissary-defaultns-migration/require.yaml new file mode 100644 index 0000000000..7bcedeacca --- /dev/null +++ b/k8s-config/emissary-defaultns-migration/require.yaml @@ -0,0 +1,11 @@ +_anchors: + _namespace: &namespace default +resources: + - { kind: Service, name: emissary-ingress-admin, namespace: *namespace } + - { kind: Service, name: emissary-ingress, namespace: *namespace } + - { kind: ClusterRole, name: emissary-ingress } + - { kind: ServiceAccount, name: emissary-ingress, namespace: *namespace } + - { kind: ClusterRoleBinding, name: emissary-ingress } + - { kind: ClusterRole, name: emissary-ingress-crd } + - { kind: ClusterRole, name: emissary-ingress-watch } + - { kind: Deployment, name: emissary-ingress, namespace: *namespace } diff --git a/k8s-config/emissary-defaultns-migration/values.yaml b/k8s-config/emissary-defaultns-migration/values.yaml new file mode 100644 index 0000000000..dc9a163803 --- /dev/null +++ b/k8s-config/emissary-defaultns-migration/values.yaml @@ -0,0 +1,31 @@ +deploymentTool: getambassador.io +podAnnotations: + consul.hashicorp.com/connect-inject: 'false' + sidecar.istio.io/inject: 'false' +containerNameOverride: ambassador +restartPolicy: Always +terminationGracePeriodSeconds: "0" +service: + type: LoadBalancer +replicaCount: 3 +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + +resources: + limits: + cpu: 1 + memory: 400Mi + requests: + memory: 100Mi +adminService: + type: NodePort +image: + repository: "$imageRepo$" + tag: "$version$" diff --git a/k8s-config/emissary-emissaryns-agent/require.yaml b/k8s-config/emissary-emissaryns-agent/require.yaml new file mode 100644 index 0000000000..7e79b9641f --- /dev/null +++ b/k8s-config/emissary-emissaryns-agent/require.yaml @@ -0,0 +1,15 @@ +_anchors: + _namespace: &namespace emissary +resources: + - { kind: ServiceAccount, name: emissary-ingress-agent, namespace: *namespace } + - { kind: ClusterRoleBinding, name: emissary-ingress-agent } + - { kind: ClusterRole, name: emissary-ingress-agent } + - { kind: ClusterRole, name: emissary-ingress-agent-pods } + - { kind: ClusterRole, name: emissary-ingress-agent-rollouts } + - { kind: ClusterRole, name: emissary-ingress-agent-applications } + - { kind: ClusterRole, name: emissary-ingress-agent-deployments } + - { kind: ClusterRole, name: emissary-ingress-agent-endpoints } + - { kind: ClusterRole, name: emissary-ingress-agent-configmaps } + - { kind: Role, name: emissary-ingress-agent-config, namespace: *namespace } + - { kind: RoleBinding, name: emissary-ingress-agent-config, namespace: *namespace } + - { kind: Deployment, name: emissary-ingress-agent, namespace: *namespace } diff --git a/k8s-config/emissary-emissaryns-agent/values.yaml b/k8s-config/emissary-emissaryns-agent/values.yaml new file mode 100644 index 0000000000..dc9a163803 --- /dev/null +++ b/k8s-config/emissary-emissaryns-agent/values.yaml @@ -0,0 +1,31 @@ +deploymentTool: getambassador.io +podAnnotations: + consul.hashicorp.com/connect-inject: 'false' + sidecar.istio.io/inject: 'false' +containerNameOverride: ambassador +restartPolicy: Always +terminationGracePeriodSeconds: "0" +service: + type: LoadBalancer +replicaCount: 3 +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + +resources: + limits: + cpu: 1 + memory: 400Mi + requests: + memory: 100Mi +adminService: + type: NodePort +image: + repository: "$imageRepo$" + tag: "$version$" diff --git a/k8s-config/emissary-emissaryns-migration/require.yaml b/k8s-config/emissary-emissaryns-migration/require.yaml new file mode 100644 index 0000000000..cb15235d45 --- /dev/null +++ b/k8s-config/emissary-emissaryns-migration/require.yaml @@ -0,0 +1,11 @@ +_anchors: + _namespace: &namespace emissary +resources: + - { kind: Service, name: emissary-ingress-admin, namespace: *namespace } + - { kind: Service, name: emissary-ingress, namespace: *namespace } + - { kind: ClusterRole, name: emissary-ingress } + - { kind: ServiceAccount, name: emissary-ingress, namespace: *namespace } + - { kind: ClusterRoleBinding, name: emissary-ingress } + - { kind: ClusterRole, name: emissary-ingress-crd } + - { kind: ClusterRole, name: emissary-ingress-watch } + - { kind: Deployment, name: emissary-ingress, namespace: *namespace } diff --git a/k8s-config/emissary-emissaryns-migration/values.yaml b/k8s-config/emissary-emissaryns-migration/values.yaml new file mode 100644 index 0000000000..dc9a163803 --- /dev/null +++ b/k8s-config/emissary-emissaryns-migration/values.yaml @@ -0,0 +1,31 @@ +deploymentTool: getambassador.io +podAnnotations: + consul.hashicorp.com/connect-inject: 'false' + sidecar.istio.io/inject: 'false' +containerNameOverride: ambassador +restartPolicy: Always +terminationGracePeriodSeconds: "0" +service: + type: LoadBalancer +replicaCount: 3 +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + +resources: + limits: + cpu: 1 + memory: 400Mi + requests: + memory: 100Mi +adminService: + type: NodePort +image: + repository: "$imageRepo$" + tag: "$version$" diff --git a/manifests/emissary/.gitignore b/manifests/emissary/.gitignore index 89dfb62e0f..8081d75c85 100644 --- a/manifests/emissary/.gitignore +++ b/manifests/emissary/.gitignore @@ -2,3 +2,7 @@ /emissary-crds.yaml /emissary-defaultns.yaml /emissary-emissaryns.yaml +/emissary-defaultns-agent.yaml +/emissary-emissaryns-agent.yaml +/emissary-defaultns-migration.yaml +/emissary-emissaryns-migration.yaml diff --git a/manifests/emissary/emissary-crds.yaml.in b/manifests/emissary/emissary-crds.yaml.in index 17f892dd09..cd4105b0e4 100644 --- a/manifests/emissary/emissary-crds.yaml.in +++ b/manifests/emissary/emissary-crds.yaml.in @@ -288,6 +288,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: AuthService is the Schema for the authservices API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -391,6 +399,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: ConsulResolver is the Schema for the ConsulResolver API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -658,6 +674,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: DevPortal is the Schema for the DevPortals API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1298,6 +1322,14 @@ spec: storage: false subresources: status: {} + - name: v1 + schema: + openAPIV3Schema: + description: Host is the Schema for the hosts API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1394,6 +1426,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: KubernetesEndpointResolver is the Schema for the kubernetesendpointresolver API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1490,6 +1530,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: KubernetesServiceResolver is the Schema for the kubernetesserviceresolver API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -1840,6 +1888,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: LogService is the Schema for the logservices API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -2848,6 +2904,14 @@ spec: storage: false subresources: status: {} + - name: v1 + schema: + openAPIV3Schema: + description: Mapping is the Schema for the mappings API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -2955,6 +3019,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: A Module defines system-wide configuration. The type of module is controlled by the .metadata.name; valid names are "ambassador" or "tls". + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -3120,6 +3192,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: RateLimitService is the Schema for the ratelimitservices API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -3347,6 +3427,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: TCPMapping is the Schema for the tcpmappings API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -3530,6 +3618,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: TLSContext is the Schema for the tlscontexts API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition @@ -3719,6 +3815,14 @@ spec: type: object served: true storage: false + - name: v1 + schema: + openAPIV3Schema: + description: TracingService is the Schema for the tracingservices API + type: object + x-kubernetes-preserve-unknown-fields: true + served: false + storage: false --- ################################################################################ # Namespace # diff --git a/manifests/emissary/emissary-defaultns-agent.yaml.in b/manifests/emissary/emissary-defaultns-agent.yaml.in new file mode 100644 index 0000000000..3749bf8614 --- /dev/null +++ b/manifests/emissary/emissary-defaultns-agent.yaml.in @@ -0,0 +1,262 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: emissary-ingress-agent +subjects: +- kind: ServiceAccount + name: emissary-ingress-agent + namespace: default +--- +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: emissary-ingress-agent +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent +rules: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-pods +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-rollouts +rules: +- apiGroups: + - argoproj.io + resources: + - rollouts + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-applications +rules: +- apiGroups: + - argoproj.io + resources: + - applications + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-deployments +rules: +- apiGroups: + - apps + - extensions + resources: + - deployments + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-endpoints +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-configmaps +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent-config + namespace: default +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent-config + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: emissary-ingress-agent-config +subjects: +- kind: ServiceAccount + name: emissary-ingress-agent + namespace: default +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent + namespace: default +spec: + progressDeadlineSeconds: 600 + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress-agent + template: + metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + spec: + containers: + - command: + - agent + env: + - name: AGENT_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AGENT_CONFIG_RESOURCE_NAME + value: emissary-ingress-agent-cloud-token + - name: RPC_CONNECTION_ADDRESS + value: https://app.getambassador.io/ + - name: AES_SNAPSHOT_URL + value: http://emissary-ingress-admin.default:8005/snapshot-external + image: $imageRepo$:$version$ + imagePullPolicy: IfNotPresent + name: agent + serviceAccountName: emissary-ingress-agent diff --git a/manifests/emissary/emissary-defaultns-migration.yaml.in b/manifests/emissary/emissary-defaultns-migration.yaml.in new file mode 100644 index 0000000000..453795cd1d --- /dev/null +++ b/manifests/emissary/emissary-defaultns-migration.yaml.in @@ -0,0 +1,330 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/chat: http://a8r.io/Slack + a8r.io/dependencies: None + a8r.io/description: The Ambassador Edge Stack admin service for internal use and health checks. + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/owner: Ambassador Labs + a8r.io/repository: github.com/datawire/ambassador + a8r.io/support: https://www.getambassador.io/about-us/support/ + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + service: ambassador-admin + name: emissary-ingress-admin + namespace: default +spec: + ports: + - name: ambassador-admin + port: 8877 + protocol: TCP + targetPort: admin + - name: ambassador-snapshot + port: 8005 + protocol: TCP + targetPort: 8005 + selector: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/chat: http://a8r.io/Slack + a8r.io/dependencies: emissary-ingress-redis.default + a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development. + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/owner: Ambassador Labs + a8r.io/repository: github.com/datawire/ambassador + a8r.io/support: https://www.getambassador.io/about-us/support/ + labels: + app.kubernetes.io/component: ambassador-service + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: default +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + - name: https + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + profile: main + type: LoadBalancer +--- +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: emissary-ingress +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress +rules: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: emissary-ingress +subjects: +- kind: ServiceAccount + name: emissary-ingress + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress + name: emissary-ingress-crd +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress + name: emissary-ingress-watch +rules: +- apiGroups: + - "" + resources: + - namespaces + - services + - secrets + - configmaps + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - getambassador.io + resources: + - '*' + verbs: + - get + - list + - watch + - update + - patch + - create + - delete +- apiGroups: + - getambassador.io + resources: + - mappings/status + verbs: + - update +- apiGroups: + - networking.internal.knative.dev + resources: + - clusteringresses + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.x-k8s.io + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - networking.internal.knative.dev + resources: + - ingresses/status + - clusteringresses/status + verbs: + - update +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: default +spec: + progressDeadlineSeconds: 600 + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + strategy: + type: RollingUpdate + template: + metadata: + annotations: + consul.hashicorp.com/connect-inject: "false" + sidecar.istio.io/inject: "false" + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + profile: main + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: AMBASSADOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: $imageRepo$:$version$ + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ambassador/v0/check_alive + port: admin + initialDelaySeconds: 30 + periodSeconds: 3 + name: ambassador + ports: + - containerPort: 8080 + name: http + - containerPort: 8443 + name: https + - containerPort: 8877 + name: admin + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ambassador/v0/check_ready + port: admin + initialDelaySeconds: 30 + periodSeconds: 3 + resources: + limits: + cpu: 1 + memory: 400Mi + requests: + cpu: 200m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /tmp/ambassador-pod-info + name: ambassador-pod-info + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: false + imagePullSecrets: [] + restartPolicy: Always + securityContext: + runAsUser: 8888 + serviceAccountName: emissary-ingress + terminationGracePeriodSeconds: 0 + volumes: + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + name: ambassador-pod-info diff --git a/manifests/emissary/emissary-emissaryns-agent.yaml.in b/manifests/emissary/emissary-emissaryns-agent.yaml.in new file mode 100644 index 0000000000..860ddbb28a --- /dev/null +++ b/manifests/emissary/emissary-emissaryns-agent.yaml.in @@ -0,0 +1,262 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent + namespace: emissary +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: emissary-ingress-agent +subjects: +- kind: ServiceAccount + name: emissary-ingress-agent + namespace: emissary +--- +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: emissary-ingress-agent +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent +rules: [] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-pods +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-rollouts +rules: +- apiGroups: + - argoproj.io + resources: + - rollouts + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-applications +rules: +- apiGroups: + - argoproj.io + resources: + - applications + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-deployments +rules: +- apiGroups: + - apps + - extensions + resources: + - deployments + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-endpoints +rules: +- apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress-agent + name: emissary-ingress-agent-configmaps +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent-config + namespace: emissary +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent-config + namespace: emissary +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: emissary-ingress-agent-config +subjects: +- kind: ServiceAccount + name: emissary-ingress-agent + namespace: emissary +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress-agent + namespace: emissary +spec: + progressDeadlineSeconds: 600 + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress-agent + template: + metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress-agent + app.kubernetes.io/part-of: emissary-ingress + product: aes + spec: + containers: + - command: + - agent + env: + - name: AGENT_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AGENT_CONFIG_RESOURCE_NAME + value: emissary-ingress-agent-cloud-token + - name: RPC_CONNECTION_ADDRESS + value: https://app.getambassador.io/ + - name: AES_SNAPSHOT_URL + value: http://emissary-ingress-admin.emissary:8005/snapshot-external + image: $imageRepo$:$version$ + imagePullPolicy: IfNotPresent + name: agent + serviceAccountName: emissary-ingress-agent diff --git a/manifests/emissary/emissary-emissaryns-migration.yaml.in b/manifests/emissary/emissary-emissaryns-migration.yaml.in new file mode 100644 index 0000000000..7737a4e217 --- /dev/null +++ b/manifests/emissary/emissary-emissaryns-migration.yaml.in @@ -0,0 +1,330 @@ +# GENERATED FILE: edits made by hand will not be preserved. +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/chat: http://a8r.io/Slack + a8r.io/dependencies: None + a8r.io/description: The Ambassador Edge Stack admin service for internal use and health checks. + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/owner: Ambassador Labs + a8r.io/repository: github.com/datawire/ambassador + a8r.io/support: https://www.getambassador.io/about-us/support/ + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + service: ambassador-admin + name: emissary-ingress-admin + namespace: emissary +spec: + ports: + - name: ambassador-admin + port: 8877 + protocol: TCP + targetPort: admin + - name: ambassador-snapshot + port: 8005 + protocol: TCP + targetPort: 8005 + selector: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + a8r.io/bugs: https://github.com/datawire/ambassador/issues + a8r.io/chat: http://a8r.io/Slack + a8r.io/dependencies: emissary-ingress-redis.emissary + a8r.io/description: The Ambassador Edge Stack goes beyond traditional API Gateways and Ingress Controllers with the advanced edge features needed to support developer self-service and full-cycle development. + a8r.io/documentation: https://www.getambassador.io/docs/edge-stack/latest/ + a8r.io/owner: Ambassador Labs + a8r.io/repository: github.com/datawire/ambassador + a8r.io/support: https://www.getambassador.io/about-us/support/ + labels: + app.kubernetes.io/component: ambassador-service + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: emissary +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + - name: https + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + profile: main + type: LoadBalancer +--- +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.getambassador.io/role-group: emissary-ingress +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress +rules: [] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: emissary +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: emissary-ingress +subjects: +- kind: ServiceAccount + name: emissary-ingress + namespace: emissary +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress + name: emissary-ingress-crd +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch + - delete +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + rbac.getambassador.io/role-group: emissary-ingress + name: emissary-ingress-watch +rules: +- apiGroups: + - "" + resources: + - namespaces + - services + - secrets + - configmaps + - endpoints + verbs: + - get + - list + - watch +- apiGroups: + - getambassador.io + resources: + - '*' + verbs: + - get + - list + - watch + - update + - patch + - create + - delete +- apiGroups: + - getambassador.io + resources: + - mappings/status + verbs: + - update +- apiGroups: + - networking.internal.knative.dev + resources: + - clusteringresses + - ingresses + verbs: + - get + - list + - watch +- apiGroups: + - networking.x-k8s.io + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - networking.internal.knative.dev + resources: + - ingresses/status + - clusteringresses/status + verbs: + - update +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + name: emissary-ingress + namespace: emissary +spec: + progressDeadlineSeconds: 600 + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/name: emissary-ingress + strategy: + type: RollingUpdate + template: + metadata: + annotations: + consul.hashicorp.com/connect-inject: "false" + sidecar.istio.io/inject: "false" + labels: + app.kubernetes.io/instance: emissary-ingress + app.kubernetes.io/managed-by: getambassador.io + app.kubernetes.io/name: emissary-ingress + app.kubernetes.io/part-of: emissary-ingress + product: aes + profile: main + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + service: ambassador + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: AMBASSADOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: $imageRepo$:$version$ + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /ambassador/v0/check_alive + port: admin + initialDelaySeconds: 30 + periodSeconds: 3 + name: ambassador + ports: + - containerPort: 8080 + name: http + - containerPort: 8443 + name: https + - containerPort: 8877 + name: admin + readinessProbe: + failureThreshold: 3 + httpGet: + path: /ambassador/v0/check_ready + port: admin + initialDelaySeconds: 30 + periodSeconds: 3 + resources: + limits: + cpu: 1 + memory: 400Mi + requests: + cpu: 200m + memory: 100Mi + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /tmp/ambassador-pod-info + name: ambassador-pod-info + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: false + imagePullSecrets: [] + restartPolicy: Always + securityContext: + runAsUser: 8888 + serviceAccountName: emissary-ingress + terminationGracePeriodSeconds: 0 + volumes: + - downwardAPI: + items: + - fieldRef: + fieldPath: metadata.labels + path: labels + name: ambassador-pod-info diff --git a/tools/src/fix-crds/business.go b/tools/src/fix-crds/business.go index 8af9da12cd..4bc8707e6e 100644 --- a/tools/src/fix-crds/business.go +++ b/tools/src/fix-crds/business.go @@ -50,6 +50,14 @@ type CRD struct { } func FixCRD(args Args, crd *CRD) error { + // Really, Golang? You couldn't just have a "set" type? + // TODO(Flynn): Look into having kubebuilder generate our unserved v1. + CRDsWithNoUnservedV1 := map[string]interface{}{ + "filterpolicies.getambassador.io": struct{}{}, + "filters.getambassador.io": struct{}{}, + "ratelimits.getambassador.io": struct{}{}, + } + // sanity check if crd.Kind != "CustomResourceDefinition" || !strings.HasPrefix(crd.APIVersion, "apiextensions.k8s.io/") { return fmt.Errorf("not a CRD: %#v", crd) @@ -120,6 +128,58 @@ func FixCRD(args Args, crd *CRD) error { ConversionReviewVersions: []string{"v1beta1"}, }, } + + // If we're generating stuff for the APIServer, make sure we have an unserved, + // unstored v1 version, unless it's one of the CRDs for which we shouldn't do that. + // + // TODO(Flynn): Look into having kubebuilder generate our unserved v1. + if args.Target == TargetAPIServerKubectl { + _, skip := CRDsWithNoUnservedV1[crd.Metadata.Name] + + if !skip { + var v2desc string + versions := make([]apiext.CustomResourceDefinitionVersion, 0, len(crd.Spec.Versions)) + + for _, version := range crd.Spec.Versions { + if version.Name != "v1" { + versions = append(versions, version) + } + + if version.Name == "v2" { + v2desc = version.Schema.OpenAPIV3Schema.Description + + // I don't want multiline descriptions in the unserved v1, so let's see + // if we have a newline in v2desc. + idx := strings.Index(v2desc, "\n") + + if idx > 0 { + // Yup. Toss everything after it. + v2desc = v2desc[:idx] + } + + // Finally, strip whitespace whether we found a newline or not. + v2desc = strings.TrimSpace(v2desc) + } + } + + preserveUnknownFields := true + + versions = append(versions, apiext.CustomResourceDefinitionVersion{ + Name: "v1", + Schema: &apiext.CustomResourceValidation{ + OpenAPIV3Schema: &apiext.JSONSchemaProps{ + Description: v2desc, + Type: "object", + XPreserveUnknownFields: &preserveUnknownFields, + }, + }, + Served: false, + Storage: false, + }) + + crd.Spec.Versions = versions + } + } } return nil diff --git a/tools/src/py-mkopensource/main.go b/tools/src/py-mkopensource/main.go index b6d322a3d2..459a6dede5 100644 --- a/tools/src/py-mkopensource/main.go +++ b/tools/src/py-mkopensource/main.go @@ -34,12 +34,14 @@ func parseLicenses(name, version, license string) map[License]struct{} { // that a human has to go make sure that the license didn't // change when upgrading. {"CacheControl", "0.12.6", "UNKNOWN"}: {Apache2}, + {"CacheControl", "0.12.10", "UNKNOWN"}: {Apache2}, {"Click", "7.0", "BSD"}: {BSD3}, {"Flask", "1.0.2", "BSD"}: {BSD3}, {"GitPython", "3.1.11", "UNKNOWN"}: {BSD3}, {"Jinja2", "2.10.1", "BSD"}: {BSD3}, {"chardet", "3.0.4", "LGPL"}: {LGPL21}, {"colorama", "0.4.3", "BSD"}: {BSD3}, + {"colorama", "0.4.4", "BSD"}: {BSD3}, {"decorator", "4.4.2", "new BSD License"}: {BSD2}, {"gitdb", "4.0.5", "BSD License"}: {BSD3}, {"idna", "2.7", "BSD-like"}: {BSD3, PSF, Unicode2015}, @@ -51,6 +53,7 @@ func parseLicenses(name, version, license string) map[License]struct{} { {"jsonschema", "3.2.0", "UNKNOWN"}: {MIT}, {"lockfile", "0.12.2", "UNKNOWN"}: {MIT}, {"oauthlib", "3.1.0", "BSD"}: {BSD3}, + {"pep517", "0.12.0", "UNKNOWN"}: {MIT}, {"pep517", "0.8.2", "UNKNOWN"}: {MIT}, {"pip-tools", "5.3.1", "BSD"}: {BSD3}, {"ptyprocess", "0.6.0", "UNKNOWN"}: {ISC}, @@ -60,6 +63,7 @@ func parseLicenses(name, version, license string) map[License]struct{} { {"python-json-logger", "2.0.2", "BSD"}: {BSD2}, {"semantic-version", "2.8.5", "BSD"}: {BSD2}, {"smmap", "3.0.4", "BSD"}: {BSD3}, + {"tomli", "1.2.2", "UNKNOWN"}: {MIT}, {"webencodings", "0.5.1", "BSD"}: {BSD3}, {"websocket-client", "0.57.0", "BSD"}: {BSD3}, {"zipp", "3.6.0", "UNKNOWN"}: {MIT}, @@ -68,7 +72,9 @@ func parseLicenses(name, version, license string) map[License]struct{} { // it's easier to just hard-code it. {"docutils", "0.17.1", "public domain, Python, 2-Clause BSD, GPL 3 (see COPYING.txt)"}: {PublicDomain, PSF, BSD2, GPL3}, {"orjson", "3.3.1", "Apache-2.0 OR MIT"}: {Apache2, MIT}, + {"orjson", "3.6.6", "Apache-2.0 OR MIT"}: {Apache2, MIT}, {"packaging", "20.4", "BSD-2-Clause or Apache-2.0"}: {BSD2, Apache2}, + {"packaging", "20.9", "BSD-2-Clause or Apache-2.0"}: {BSD2, Apache2}, }[tuple{name, version, license}] if ok { ret := make(map[License]struct{}, len(override)) @@ -91,9 +97,11 @@ func parseLicenses(name, version, license string) map[License]struct{} { "Apache Software License": {Apache2}, "Apache Software License 2.0": {Apache2}, + "BSD-2-Clause": {BSD2}, + "3-Clause BSD License": {BSD3}, - "BSD-2-Clause": {BSD2}, "BSD-3-Clause": {BSD3}, + "BSD 3 Clause": {BSD3}, "ISC license": {ISC}, "ISC": {ISC},