diff --git a/.travis.yml b/.travis.yml index bad752e3ce..cfb21c685c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,59 +22,56 @@ notifications: # New secure variables can be added using travis encrypt -r kubernetes/ingress-nginx --add K=V env: global: - - DOCKER=docker - - SKIP_SNAPSHOT=true - - NODE_IP=10.192.0.3 - - E2E_NODES=6 - - GH_REF=github.com/kubernetes/ingress-nginx - + - DOCKER=docker + - SKIP_SNAPSHOT=true + - GH_REF=github.com/kubernetes/ingress-nginx branches: only: - - master - + master jobs: include: - - stage: Static Check - script: - - make static-check - - stage: Lua Unit Test - script: - - make lua-test - - stage: Coverage - script: - - make cover - - stage: e2e - if: (branch = master AND env(COMPONENT) != "docs") OR (type = pull_request AND commit_message !~ /(skip-e2e)/) - before_script: - - test/e2e/up.sh - script: - - make e2e-test - # split builds to avoid job timeouts - - stage: publish amd64 - if: type = push AND branch = master AND repo = Shopify/ingress AND env(COMPONENT) = "ingress-controller" - script: - - .travis/publish.sh amd64 - - stage: publish arm - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" - script: - - make register-qemu - - .travis/publish.sh arm - - stage: publish arm64 - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" - script: - - make register-qemu - - .travis/publish.sh arm64 - - stage: publish ppc64le - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" - script: - - make register-qemu - - .travis/publish.sh ppc64le - - stage: publish s390x - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" - script: - - make register-qemu - - .travis/publish.sh s390x - - stage: Publish docs - if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "docs" - script: - - .travis/publish-docs.sh + - stage: Static Check + script: + - make static-check + - stage: Lua Unit Test + script: + - make lua-test + - stage: Coverage + script: + - make cover + - stage: e2e + if: (branch = master AND env(COMPONENT) != "docs") OR (type = pull_request AND commit_message !~ /(skip-e2e)/) + before_script: + - make e2e-test-image + - test/e2e/up.sh + script: + - KUBECONFIG=$(cat /tmp/kubeconfig) make e2e-test + # split builds to avoid job timeouts + - stage: publish amd64 + if: type = push AND branch = master AND repo = Shopify/ingress AND env(COMPONENT) = "ingress-controller" + script: + - .travis/publish.sh amd64 + - stage: publish arm + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" + script: + - make register-qemu + - .travis/publish.sh arm + - stage: publish arm64 + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" + script: + - make register-qemu + - .travis/publish.sh arm64 + - stage: publish ppc64le + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" + script: + - make register-qemu + - .travis/publish.sh ppc64le + - stage: publish s390x + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "ingress-controller" + script: + - make register-qemu + - .travis/publish.sh s390x + - stage: Publish docs + if: type = api AND branch = master AND repo = kubernetes/ingress-nginx AND env(COMPONENT) = "docs" + script: + - .travis/publish-docs.sh diff --git a/Changelog.md b/Changelog.md index 822301055f..ca1cf3cb82 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,123 @@ # Changelog +### 0.22.0 + +**Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.22.0` + +_New Features:_ + +- NGINX 1.15.8 +- New balancer implementation: consistent hash subset +- Adds support for HTTP2 Push Preload annotation +- Allow to disable NGINX prometheus metrics +- New --disable-catch-all flag to ignore catch-all ingresses +- Add flag --metrics-per-host to make per-host metrics optional + +_Breaking changes:_ + +- Annotation `nginx.ingress.kubernetes.io/rewrite-target` has changed and will not behave as expected if you don't update them. + + Refer to [https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target](https://kubernetes.github.io/ingress-nginx/examples/rewrite/#rewrite-target) on how to change it. + + Refer to [https://github.com/kubernetes/ingress-nginx/pull/3174#issuecomment-455665710](https://github.com/kubernetes/ingress-nginx/pull/3174#issuecomment-455665710) on how to do seamless migration. + +- Annotations `nginx.ingress.kubernetes.io/add-base-url` and `nginx.ingress.kubernetes.io/base-url-scheme` were removed. + + Please check issue [#3174](https://github.com/kubernetes/ingress-nginx/pull/3174) for details. + +- By default do not trust any client to extract true client IP address from X-Forwarded-For header using realip module (`use-forwarded-headers: "false"`) + +_Changes:_ + +- [X] [#3174](https://github.com/kubernetes/ingress-nginx/pull/3174) Generalize Rewrite Block Creation and Deprecate AddBaseUrl (not backwards compatible) +- [X] [#3240](https://github.com/kubernetes/ingress-nginx/pull/3240) Adds support for HTTP2 Push Preload annotation +- [X] [#3333](https://github.com/kubernetes/ingress-nginx/pull/3333) breaking change: by default do not trust any client +- [X] [#3342](https://github.com/kubernetes/ingress-nginx/pull/3342) Allow privilege escalation +- [X] [#3363](https://github.com/kubernetes/ingress-nginx/pull/3363) Document for cookie expires annotation +- [X] [#3396](https://github.com/kubernetes/ingress-nginx/pull/3396) New balancer implementation: consistent hash subset +- [X] [#3446](https://github.com/kubernetes/ingress-nginx/pull/3446) add more testing for mergeAlternativeBackends +- [X] [#3453](https://github.com/kubernetes/ingress-nginx/pull/3453) Monitor fixes +- [X] [#3455](https://github.com/kubernetes/ingress-nginx/pull/3455) Watch controller Pods and make then available in k8sStore +- [X] [#3465](https://github.com/kubernetes/ingress-nginx/pull/3465) Bump nginx-opentracing for gRPC support +- [X] [#3467](https://github.com/kubernetes/ingress-nginx/pull/3467) store ewma stats per backend +- [X] [#3470](https://github.com/kubernetes/ingress-nginx/pull/3470) Use opentracing_grpc_propagate_context when necessary +- [X] [#3474](https://github.com/kubernetes/ingress-nginx/pull/3474) Improve parsing of annotations and use of Ingress wrapper +- [X] [#3476](https://github.com/kubernetes/ingress-nginx/pull/3476) Fix nginx directory permissions +- [X] [#3477](https://github.com/kubernetes/ingress-nginx/pull/3477) clarify canary ingress +- [X] [#3478](https://github.com/kubernetes/ingress-nginx/pull/3478) delete unused buildLoadBalancingConfig +- [X] [#3487](https://github.com/kubernetes/ingress-nginx/pull/3487) dynamic certificate mode should support widlcard hosts +- [X] [#3488](https://github.com/kubernetes/ingress-nginx/pull/3488) Add probes to deployments used in e2e tests +- [X] [#3492](https://github.com/kubernetes/ingress-nginx/pull/3492) Fix data size validations +- [X] [#3494](https://github.com/kubernetes/ingress-nginx/pull/3494) Since dynamic mode only checking for 'return 503' is not valid anymore +- [X] [#3495](https://github.com/kubernetes/ingress-nginx/pull/3495) Adjust default timeout for e2e tests +- [X] [#3497](https://github.com/kubernetes/ingress-nginx/pull/3497) Wait for the right number of endpoints +- [X] [#3498](https://github.com/kubernetes/ingress-nginx/pull/3498) Update godeps +- [X] [#3501](https://github.com/kubernetes/ingress-nginx/pull/3501) be consistent with what Nginx supports +- [X] [#3503](https://github.com/kubernetes/ingress-nginx/pull/3503) compare error with error types from k8s.io/apimachinery/pkg/api/errors +- [X] [#3504](https://github.com/kubernetes/ingress-nginx/pull/3504) fix an ewma unit test +- [X] [#3505](https://github.com/kubernetes/ingress-nginx/pull/3505) Update lua configuration_data when number of controller pod change +- [X] [#3507](https://github.com/kubernetes/ingress-nginx/pull/3507) Remove temporal configuration file after a while +- [X] [#3508](https://github.com/kubernetes/ingress-nginx/pull/3508) Update nginx to 1.15.7 +- [X] [#3509](https://github.com/kubernetes/ingress-nginx/pull/3509) [1759] Ingress affinity session cookie with Secure flag for HTTPS +- [X] [#3512](https://github.com/kubernetes/ingress-nginx/pull/3512) Allow to disable NGINX metrics +- [X] [#3518](https://github.com/kubernetes/ingress-nginx/pull/3518) Fix log output format +- [X] [#3521](https://github.com/kubernetes/ingress-nginx/pull/3521) Fix a bug with Canary becoming main server +- [X] [#3522](https://github.com/kubernetes/ingress-nginx/pull/3522) {tcp,udp}-services cm appear twice +- [X] [#3525](https://github.com/kubernetes/ingress-nginx/pull/3525) make canary ingresses independent of the order they were applied +- [X] [#3530](https://github.com/kubernetes/ingress-nginx/pull/3530) Update nginx image +- [X] [#3532](https://github.com/kubernetes/ingress-nginx/pull/3532) Ignore updates of ingresses with invalid class +- [X] [#3536](https://github.com/kubernetes/ingress-nginx/pull/3536) Replace dockerfile entrypoint +- [X] [#3548](https://github.com/kubernetes/ingress-nginx/pull/3548) e2e test to ensure graceful shutdown does not lose requests +- [X] [#3551](https://github.com/kubernetes/ingress-nginx/pull/3551) Fix --enable-dynamic-certificates for nested subdomain +- [X] [#3553](https://github.com/kubernetes/ingress-nginx/pull/3553) handle_error_when_executing_diff +- [X] [#3562](https://github.com/kubernetes/ingress-nginx/pull/3562) Rename nginx.yaml to nginx.json +- [X] [#3566](https://github.com/kubernetes/ingress-nginx/pull/3566) Add Unit Tests for getIngressInformation +- [X] [#3569](https://github.com/kubernetes/ingress-nginx/pull/3569) fix status updated: make sure ingress.status is copied +- [X] [#3573](https://github.com/kubernetes/ingress-nginx/pull/3573) Update Certificate Generation Docs to not use MD5 +- [X] [#3581](https://github.com/kubernetes/ingress-nginx/pull/3581) lua randomseed per worker +- [X] [#3582](https://github.com/kubernetes/ingress-nginx/pull/3582) Sort ingresses by creation timestamp +- [X] [#3584](https://github.com/kubernetes/ingress-nginx/pull/3584) Update go to 1.11.4 +- [X] [#3586](https://github.com/kubernetes/ingress-nginx/pull/3586) Add --disable-catch-all option to disable catch-all server +- [X] [#3587](https://github.com/kubernetes/ingress-nginx/pull/3587) adjust dind istallation +- [X] [#3594](https://github.com/kubernetes/ingress-nginx/pull/3594) Add a flag to make per-host metrics optional +- [X] [#3596](https://github.com/kubernetes/ingress-nginx/pull/3596) Fix proxy_host variable configuration +- [X] [#3601](https://github.com/kubernetes/ingress-nginx/pull/3601) Update nginx to 1.15.8 +- [X] [#3602](https://github.com/kubernetes/ingress-nginx/pull/3602) Update nginx image +- [X] [#3604](https://github.com/kubernetes/ingress-nginx/pull/3604) Add an option to automatically set worker_connections based on worker_rlimit_nofile +- [X] [#3615](https://github.com/kubernetes/ingress-nginx/pull/3615) Pass k8s `Service` data through to the TCP balancer script. +- [X] [#3620](https://github.com/kubernetes/ingress-nginx/pull/3620) Added server alias to metrics +- [X] [#3624](https://github.com/kubernetes/ingress-nginx/pull/3624) Update nginx to fix geoip database deprecation +- [X] [#3625](https://github.com/kubernetes/ingress-nginx/pull/3625) Update nginx image +- [X] [#3633](https://github.com/kubernetes/ingress-nginx/pull/3633) Fix a bug in Ingress update handler +- [X] [#3634](https://github.com/kubernetes/ingress-nginx/pull/3634) canary by cookie should support hypen in cookie name +- [X] [#3635](https://github.com/kubernetes/ingress-nginx/pull/3635) Fix duplicate alternative backend merging +- [X] [#3637](https://github.com/kubernetes/ingress-nginx/pull/3637) Add support for redirect https to https (from-to-www-redirect) +- [X] [#3640](https://github.com/kubernetes/ingress-nginx/pull/3640) add limit connection status code +- [X] [#3641](https://github.com/kubernetes/ingress-nginx/pull/3641) Replace deprecated apiVersion in deploy folder +- [X] [#3643](https://github.com/kubernetes/ingress-nginx/pull/3643) Update nginx +- [X] [#3644](https://github.com/kubernetes/ingress-nginx/pull/3644) Update nginx image +- [X] [#3648](https://github.com/kubernetes/ingress-nginx/pull/3648) Remove stickyness cookie domain from Lua balancer to match old behavior +- [X] [#3649](https://github.com/kubernetes/ingress-nginx/pull/3649) Empty access_by_lua_block breaks satisfy any +- [X] [#3655](https://github.com/kubernetes/ingress-nginx/pull/3655) Remove flag sort-backends +- [X] [#3656](https://github.com/kubernetes/ingress-nginx/pull/3656) Change default value of flag for ssl chain completion +- [X] [#3660](https://github.com/kubernetes/ingress-nginx/pull/3660) Revert max-worker-connections default value +- [X] [#3664](https://github.com/kubernetes/ingress-nginx/pull/3664) Fix invalid validation creating prometheus valid host values + +_Documentation:_ + +- [X] [#3513](https://github.com/kubernetes/ingress-nginx/pull/3513) Revert removal of TCP and UDP support configmaps in mandatroy manifest +- [X] [#3456](https://github.com/kubernetes/ingress-nginx/pull/3456) Revert TCP/UDP documentation removal and links +- [X] [#3482](https://github.com/kubernetes/ingress-nginx/pull/3482) Annotations doc links: minor fixes and unification +- [X] [#3491](https://github.com/kubernetes/ingress-nginx/pull/3491) Update example to use latest Dashboard version. +- [X] [#3510](https://github.com/kubernetes/ingress-nginx/pull/3510) Update mkdocs [skip ci] +- [X] [#3516](https://github.com/kubernetes/ingress-nginx/pull/3516) Fix error in configmap yaml definition +- [X] [#3575](https://github.com/kubernetes/ingress-nginx/pull/3575) Add documentation for spec.rules.host format +- [X] [#3577](https://github.com/kubernetes/ingress-nginx/pull/3577) Add standard labels to namespace specs +- [X] [#3592](https://github.com/kubernetes/ingress-nginx/pull/3592) Add inside the User Guide documentation section a basic usage section and example +- [X] [#3605](https://github.com/kubernetes/ingress-nginx/pull/3605) Fix CLA URLs +- [X] [#3627](https://github.com/kubernetes/ingress-nginx/pull/3627) Typo: docs/examples/rewrite/README.md +- [X] [#3632](https://github.com/kubernetes/ingress-nginx/pull/3632) Fixed: error parsing with-rbac.yaml: error converting YAML to JSON + ### 0.21.0 **Image:** `quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0` diff --git a/Gopkg.lock b/Gopkg.lock index a734b2c0d4..917b75ece4 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -269,12 +269,20 @@ version = "v1.0.0" [[projects]] - digest = "1:9b9758cc00f332bd95147c2e343dccd662ab02cb716a50a89e6b6563ea2df314" + digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f" name = "github.com/imdario/mergo" packages = ["."] pruneopts = "NUT" - revision = "7fe0c75c13abdee74b09fcacef5ea1c6bba6a874" - version = "0.2.4" + revision = "7c29201646fa3de8506f701213473dd407f19646" + version = "v0.3.7" + +[[projects]] + digest = "1:406338ad39ab2e37b7f4452906442a3dbf0eb3379dd1f06aafb5c07e769a5fbb" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "NUT" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" [[projects]] digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728" @@ -554,6 +562,14 @@ revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" version = "v1.1.2" +[[projects]] + digest = "1:343d44e06621142ab09ae0c76c1799104cdfddd3ffb445d78b1adf8dc3ffaf3d" + name = "github.com/spf13/cobra" + packages = ["."] + pruneopts = "NUT" + revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" + version = "v0.0.3" + [[projects]] digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" name = "github.com/spf13/pflag" @@ -562,6 +578,14 @@ revision = "298182f68c66c05229eb03ac171abe6e309ee79a" version = "v1.0.3" +[[projects]] + branch = "master" + digest = "1:1e4c6579336a71d60c374c18fefca05377a3b241d5e0b246caa6efd37a1d1369" + name = "github.com/tv42/httpunix" + packages = ["."] + pruneopts = "NUT" + revision = "b75d8614f926c077e48d85f1f8f7885b758c6225" + [[projects]] digest = "1:c10994a08ed2ff2cc7611d03ded8bb5f782096880b2daab391adbd9ab95a1764" name = "github.com/zakjan/cert-chain-resolver" @@ -856,7 +880,7 @@ "storage/v1beta1", ] pruneopts = "NUT" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[projects]] digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c" @@ -869,10 +893,10 @@ "pkg/client/clientset/clientset/typed/apiextensions/v1beta1", ] pruneopts = "NUT" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[projects]] - digest = "1:692e27ed8a5eb2d74bde52d323d428814cd9a6e0f726d02ffd60fda7819e1ee7" + digest = "1:8e9cf9f8e6ced4caf9abcf7191f46d81d0ce7cbdb1ce78a17944abe0020edc50" name = "k8s.io/apimachinery" packages = [ "pkg/api/errors", @@ -881,6 +905,7 @@ "pkg/apis/meta/internalversion", "pkg/apis/meta/v1", "pkg/apis/meta/v1/unstructured", + "pkg/apis/meta/v1/unstructured/unstructuredscheme", "pkg/apis/meta/v1beta1", "pkg/conversion", "pkg/conversion/queryparams", @@ -924,7 +949,7 @@ "third_party/forked/golang/reflect", ] pruneopts = "NUT" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[projects]] digest = "1:cc0487260dc4ffb2b513273ad8438497b8df2d8c0de90aaf03d22cc5b58e3fe1" @@ -934,10 +959,21 @@ "pkg/util/logs", ] pruneopts = "NUT" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" + +[[projects]] + digest = "1:63793246976569a95e534c731e79cc555dabee6f8efa29a0b28ca33f23b7e28b" + name = "k8s.io/cli-runtime" + packages = [ + "pkg/genericclioptions", + "pkg/genericclioptions/printers", + "pkg/genericclioptions/resource", + ] + pruneopts = "NUT" + revision = "kubernetes-1.13.3" [[projects]] - digest = "1:14961132526c5e588ccfa30efd6c977db308c1b1fb83ad4043c3a92c961521ae" + digest = "1:8ed4701154a41791914e89f9aad23ec76c8826824af285c5606dd4afd9ac2f25" name = "k8s.io/client-go" packages = [ "discovery", @@ -1095,6 +1131,7 @@ "plugin/pkg/client/auth/openstack", "rest", "rest/watch", + "restmapper", "testing", "third_party/forked/golang/template", "tools/auth", @@ -1124,7 +1161,7 @@ "util/workqueue", ] pruneopts = "NUT" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[projects]] branch = "master" @@ -1164,7 +1201,7 @@ revision = "0317810137be915b9cf888946c6e115c1bfac693" [[projects]] - digest = "1:df9bd8d59539e980d5d565b00a6d45e445e62d8c89d60fe0fa520ad6e54d32e4" + digest = "1:a6f576f3b3e363c56c2b5917d4fb81681cf5d48a6d7a1ec01002bd730720aa6c" name = "k8s.io/kubernetes" packages = [ "pkg/api/legacyscheme", @@ -1186,7 +1223,7 @@ "third_party/forked/golang/expansion", ] pruneopts = "NUT" - revision = "v1.13.0-rc.2" + revision = "v1.13.3" [[projects]] branch = "master" @@ -1229,13 +1266,16 @@ "github.com/prometheus/client_golang/prometheus/promhttp", "github.com/prometheus/client_model/go", "github.com/prometheus/common/expfmt", + "github.com/spf13/cobra", "github.com/spf13/pflag", + "github.com/tv42/httpunix", "github.com/zakjan/cert-chain-resolver/certUtil", "gopkg.in/fsnotify/fsnotify.v1", "gopkg.in/go-playground/pool.v3", "k8s.io/api/apps/v1beta1", "k8s.io/api/core/v1", "k8s.io/api/extensions/v1beta1", + "k8s.io/api/rbac/v1", "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/apis/meta/v1", @@ -1252,11 +1292,13 @@ "k8s.io/apimachinery/pkg/watch", "k8s.io/apiserver/pkg/server/healthz", "k8s.io/apiserver/pkg/util/logs", + "k8s.io/cli-runtime/pkg/genericclioptions", "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/rest", "k8s.io/client-go/tools/cache", @@ -1265,6 +1307,7 @@ "k8s.io/client-go/tools/leaderelection", "k8s.io/client-go/tools/leaderelection/resourcelock", "k8s.io/client-go/tools/record", + "k8s.io/client-go/tools/remotecommand", "k8s.io/client-go/util/cert", "k8s.io/client-go/util/flowcontrol", "k8s.io/client-go/util/workqueue", diff --git a/Gopkg.toml b/Gopkg.toml index 5ad1e3b386..c2557a5f6f 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -20,6 +20,8 @@ # name = "github.com/x/y" # version = "2.4.0" +ignored = ["github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller*"] + [prune] non-go = true go-tests = true @@ -53,7 +55,7 @@ [[constraint]] name = "github.com/imdario/mergo" - version = "0.2.4" + version = "0.3.7" [[constraint]] branch = "master" @@ -97,24 +99,28 @@ [[constraint]] name = "k8s.io/kubernetes" - revision = "v1.13.0-rc.2" + revision = "v1.13.3" [[constraint]] name = "k8s.io/api" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[constraint]] name = "k8s.io/apimachinery" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[constraint]] name = "k8s.io/client-go" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[constraint]] name = "k8s.io/apiextensions-apiserver" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" [[constraint]] name = "k8s.io/apiserver" - revision = "kubernetes-1.13.0-rc.2" + revision = "kubernetes-1.13.3" + +[[constraint]] + name = "k8s.io/cli-runtime" + revision = "kubernetes-1.13.3" \ No newline at end of file diff --git a/Makefile b/Makefile index fc8c34f0fb..f15ca0e9dc 100644 --- a/Makefile +++ b/Makefile @@ -26,11 +26,9 @@ GOHOSTOS ?= $(shell go env GOHOSTOS) # Allow limiting the scope of the e2e tests. By default run everything FOCUS ?= .* # number of parallel test -E2E_NODES ?= 4 -# slow test only if takes > 40s -SLOW_E2E_THRESHOLD ?= 40 - -NODE_IP ?= $(shell minikube ip) +E2E_NODES ?= 8 +# slow test only if takes > 50s +SLOW_E2E_THRESHOLD ?= 50 ifeq ($(GOHOSTOS),darwin) SED_I=sed -i '' @@ -61,7 +59,7 @@ IMAGE = $(REGISTRY)/$(IMGNAME) MULTI_ARCH_IMG = $(IMAGE)-$(ARCH) # Set default base image dynamically for each arch -BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.75 +BASEIMAGE?=quay.io/kubernetes-ingress-controller/nginx-$(ARCH):0.79 ifeq ($(ARCH),arm64) QEMUARCH=aarch64 @@ -105,7 +103,7 @@ container: clean-container .container-$(ARCH) @echo "+ Copying artifact to temporary directory" mkdir -p $(TEMP_DIR)/rootfs cp bin/$(ARCH)/nginx-ingress-controller $(TEMP_DIR)/rootfs/nginx-ingress-controller - + cp bin/$(ARCH)/dbg $(TEMP_DIR)/rootfs/dbg @echo "+ Building container image $(MULTI_ARCH_IMG):$(TAG)" cp -RP ./* $(TEMP_DIR) $(SED_I) "s|BASEIMAGE|$(BASEIMAGE)|g" $(DOCKERFILE) @@ -155,6 +153,12 @@ build: GOBUILD_FLAGS="$(GOBUILD_FLAGS)" \ build/go-in-docker.sh build/build.sh +.PHONY: build-plugin +build-plugin: + @$(DEF_VARS) \ + GOBUILD_FLAGS="$(GOBUILD_FLAGS)" \ + build/go-in-docker.sh build/build-plugin.sh + .PHONY: clean clean: rm -rf bin/ .gocache/ .env @@ -167,7 +171,6 @@ static-check: .PHONY: test test: @$(DEF_VARS) \ - NODE_IP=$(NODE_IP) \ DOCKER_OPTS="-i --net=host" \ build/go-in-docker.sh build/test.sh @@ -179,13 +182,31 @@ lua-test: .PHONY: e2e-test e2e-test: - @$(DEF_VARS) \ - FOCUS=$(FOCUS) \ - E2E_NODES=$(E2E_NODES) \ - DOCKER_OPTS="-i --net=host" \ - NODE_IP=$(NODE_IP) \ - SLOW_E2E_THRESHOLD=$(SLOW_E2E_THRESHOLD) \ - build/go-in-docker.sh build/e2e-tests.sh + echo "Granting permissions to ingress-nginx e2e service account..." + kubectl create serviceaccount ingress-nginx-e2e || true + kubectl create clusterrolebinding permissive-binding \ + --clusterrole=cluster-admin \ + --user=admin \ + --user=kubelet \ + --serviceaccount=default:ingress-nginx-e2e || true + + kubectl run --rm -i --tty \ + --attach \ + --restart=Never \ + --generator=run-pod/v1 \ + --env="E2E_NODES=$(E2E_NODES)" \ + --env="FOCUS=$(FOCUS)" \ + --env="SLOW_E2E_THRESHOLD=$(SLOW_E2E_THRESHOLD)" \ + --overrides='{ "apiVersion": "v1", "spec":{"serviceAccountName": "ingress-nginx-e2e"}}' \ + e2e --image=nginx-ingress-controller:e2e + +.PHONY: e2e-test-image +e2e-test-image: + @$(DEF_VARS) \ + DOCKER_OPTS="-i --net=host" \ + build/go-in-docker.sh build/build-e2e.sh + + make -C test/e2e-image .PHONY: cover cover: diff --git a/OWNERS b/OWNERS index a31555ef69..f55b76c489 100644 --- a/OWNERS +++ b/OWNERS @@ -4,9 +4,7 @@ approvers: - ingress-nginx-admins - ingress-nginx-maintainers - ElvinEfendi - - antoineco reviewers: - aledbf - ElvinEfendi - - antoineco diff --git a/build/build-e2e.sh b/build/build-e2e.sh new file mode 100755 index 0000000000..bf73e34c95 --- /dev/null +++ b/build/build-e2e.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +ginkgo build ./test/e2e diff --git a/build/build-plugin.sh b/build/build-plugin.sh new file mode 100755 index 0000000000..e749938a02 --- /dev/null +++ b/build/build-plugin.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +# Copyright 2018 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +declare -a mandatory +mandatory=( + PKG + ARCH + GIT_COMMIT + REPO_INFO + TAG +) + +missing=false +for var in "${mandatory[@]}"; do + if [[ -z "${!var:-}" ]]; then + echo "Environment variable $var must be set" + missing=true + fi +done + +if [ "$missing" = true ]; then + exit 1 +fi + +export CGO_ENABLED=0 + +release=cmd/plugin/release + +function build_for_arch(){ + os=$1 + arch=$2 + + env GOOS=${os} GOARCH=${arch} go build \ + ${GOBUILD_FLAGS} \ + -ldflags "-s -w \ + -X ${PKG}/version.RELEASE=${TAG} \ + -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ + -X ${PKG}/version.REPO=${REPO_INFO}" \ + -o ${release}/kubectl-ingress_nginx ${PKG}/cmd/plugin + + tar -C ${release} -zcvf ${release}/kubectl-ingress_nginx-${os}-${arch}.tar.gz kubectl-ingress_nginx + rm ${release}/kubectl-ingress_nginx + hash=`sha256sum ${release}/kubectl-ingress_nginx-${os}-${arch}.tar.gz | awk '{ print $1 }'` + sed -i "s/%%%shasum_${os}_${arch}%%%/${hash}/g" ${release}/ingress-nginx.yaml +} + +rm -rf ${release} +mkdir ${release} + +cp cmd/plugin/ingress-nginx.yaml.tmpl ${release}/ingress-nginx.yaml + +sed -i "s/%%%tag%%%/${TAG}/g" ${release}/ingress-nginx.yaml + +build_for_arch darwin amd64 +build_for_arch linux amd64 +build_for_arch windows amd64 diff --git a/build/build.sh b/build/build.sh index 6d2e39cbd5..0fd952a11a 100755 --- a/build/build.sh +++ b/build/build.sh @@ -48,3 +48,11 @@ go build \ -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ -X ${PKG}/version.REPO=${REPO_INFO}" \ -o bin/${ARCH}/nginx-ingress-controller ${PKG}/cmd/nginx + +go build \ + ${GOBUILD_FLAGS} \ + -ldflags "-s -w \ + -X ${PKG}/version.RELEASE=${TAG} \ + -X ${PKG}/version.COMMIT=${GIT_COMMIT} \ + -X ${PKG}/version.REPO=${REPO_INFO}" \ + -o bin/${ARCH}/dbg ${PKG}/cmd/dbg diff --git a/build/dev-env.sh b/build/dev-env.sh index bfd9ff1fd8..4c74926f83 100755 --- a/build/dev-env.sh +++ b/build/dev-env.sh @@ -22,6 +22,8 @@ SKIP_MINIKUBE_START=${SKIP_MINIKUBE_START:-} NAMESPACE="${NAMESPACE:-ingress-nginx}" echo "NAMESPACE is set to ${NAMESPACE}" +kubectl config use-context minikube + export TAG=dev export ARCH=amd64 export REGISTRY=${REGISTRY:-ingress-controller} @@ -29,7 +31,7 @@ export REGISTRY=${REGISTRY:-ingress-controller} DEV_IMAGE=${REGISTRY}/nginx-ingress-controller:${TAG} if [ -z "${SKIP_MINIKUBE_START}" ]; then - test $(minikube status | grep Running | wc -l) -eq 2 && $(minikube status | grep -q 'Correctly Configured') || minikube start \ + test $(minikube status | grep Running | wc -l) -ge 2 && $(minikube status | grep -q 'Correctly Configured') || minikube start \ --extra-config=kubelet.sync-frequency=1s \ --extra-config=apiserver.authorization-mode=RBAC diff --git a/build/e2e-tests.sh b/build/e2e-tests.sh deleted file mode 100755 index cdd6f939d1..0000000000 --- a/build/e2e-tests.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -# Copyright 2018 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -declare -a mandatory -mandatory=( - NODE_IP - SLOW_E2E_THRESHOLD - PKG - FOCUS - E2E_NODES -) - -missing=false -for var in ${mandatory[@]}; do - if [[ -z "${!var+x}" ]]; then - echo "Environment variable $var must be set" - missing=true - fi -done - -if [ "$missing" = true ];then - exit 1 -fi - -SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/.. - -ginkgo build ./test/e2e - -echo "Running e2e test suite..." -ginkgo \ - -randomizeSuites \ - -randomizeAllSpecs \ - -flakeAttempts=2 \ - -focus=${FOCUS} \ - -skip="\[Serial\]" \ - -p \ - -trace \ - -nodes=${E2E_NODES} \ - -slowSpecThreshold=${SLOW_E2E_THRESHOLD} \ - test/e2e/e2e.test - -echo "Running e2e test suite with tests that require serial execution..." -ginkgo \ - -randomizeSuites \ - -randomizeAllSpecs \ - -flakeAttempts=2 \ - -focus="\[Serial\]" \ - -p \ - -trace \ - -nodes=1 \ - -slowSpecThreshold=${SLOW_E2E_THRESHOLD} \ - test/e2e/e2e.test diff --git a/build/go-in-docker.sh b/build/go-in-docker.sh index cf4c21ecbe..ee7b4eb95c 100755 --- a/build/go-in-docker.sh +++ b/build/go-in-docker.sh @@ -40,7 +40,7 @@ if [ "$missing" = true ];then exit 1 fi -E2E_IMAGE=quay.io/kubernetes-ingress-controller/e2e:v01092019-b433108ea +E2E_IMAGE=quay.io/kubernetes-ingress-controller/e2e:v02252019-286c1f306 DOCKER_OPTS=${DOCKER_OPTS:-""} @@ -50,18 +50,13 @@ tee .env << EOF PKG=${PKG:-""} ARCH=${ARCH:-""} GIT_COMMIT=${GIT_COMMIT:-""} -E2E_NODES=${E2E_NODES:-4} -FOCUS=${FOCUS:-.*} TAG=${TAG:-"0.0"} HOME=${HOME:-/root} -KUBECONFIG=${HOME}/.kube/config GOARCH=${GOARCH} GOBUILD_FLAGS=${GOBUILD_FLAGS:-"-v"} PWD=${PWD} BUSTED_ARGS=${BUSTED_ARGS:-""} REPO_INFO=${REPO_INFO:-local} -NODE_IP=${NODE_IP:-127.0.0.1} -SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-40} EOF MINIKUBE_PATH=${HOME}/.minikube diff --git a/cmd/dbg/main.go b/cmd/dbg/main.go new file mode 100644 index 0000000000..7b9657ab36 --- /dev/null +++ b/cmd/dbg/main.go @@ -0,0 +1,241 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/spf13/cobra" + "k8s.io/ingress-nginx/internal/nginx" + "os" +) + +const ( + backendsPath = "/configuration/backends" + generalPath = "/configuration/general" + certsPath = "/configuration/certs" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "dbg", + Short: "dbg is a tool for quickly inspecting the state of the nginx instance", + } + + backendsCmd := &cobra.Command{ + Use: "backends", + Short: "Inspect the dynamically-loaded backends information", + } + rootCmd.AddCommand(backendsCmd) + + backendsAllCmd := &cobra.Command{ + Use: "all", + Short: "Output the all dynamic backend information as a JSON array", + Run: func(cmd *cobra.Command, args []string) { + backendsAll() + }, + } + backendsCmd.AddCommand(backendsAllCmd) + + backendsListCmd := &cobra.Command{ + Use: "list", + Short: "Output a newline-separated list of the backend names", + Run: func(cmd *cobra.Command, args []string) { + backendsList() + }, + } + backendsCmd.AddCommand(backendsListCmd) + + backendsGetCmd := &cobra.Command{ + Use: "get [backend name]", + Short: "Output the backend information only for the backend that has this name", + Args: cobra.ExactArgs(1), + Run: func(cmd *cobra.Command, args []string) { + backendsGet(args[0]) + }, + } + backendsCmd.AddCommand(backendsGetCmd) + + certCmd := &cobra.Command{ + Use: "certs", + Short: "Inspect dynamic SSL certificates", + } + + certGetCmd := &cobra.Command{ + Use: "get [hostname]", + Short: "Get the dynamically-loaded certificate information for the given hostname", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + certGet(args[0]) + return nil + }, + } + certCmd.AddCommand(certGetCmd) + + rootCmd.AddCommand(certCmd) + + generalCmd := &cobra.Command{ + Use: "general", + Short: "Output the general dynamic lua state", + Run: func(cmd *cobra.Command, args []string) { + general() + }, + } + rootCmd.AddCommand(generalCmd) + + confCmd := &cobra.Command{ + Use: "conf", + Short: "Dump the contents of /etc/nginx/nginx.conf", + Run: func(cmd *cobra.Command, args []string) { + readNginxConf() + }, + } + rootCmd.AddCommand(confCmd) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } + +} + +func backendsAll() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var prettyBuffer bytes.Buffer + indentErr := json.Indent(&prettyBuffer, body, "", " ") + if indentErr != nil { + fmt.Println(indentErr) + return + } + + fmt.Println(string(prettyBuffer.Bytes())) +} + +func backendsList() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var f interface{} + unmarshalErr := json.Unmarshal(body, &f) + if unmarshalErr != nil { + fmt.Println(unmarshalErr) + return + } + backends := f.([]interface{}) + + for _, backendi := range backends { + backend := backendi.(map[string]interface{}) + fmt.Println(backend["name"].(string)) + } +} + +func backendsGet(name string) { + statusCode, body, requestErr := nginx.NewGetStatusRequest(backendsPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var f interface{} + unmarshalErr := json.Unmarshal(body, &f) + if unmarshalErr != nil { + fmt.Println(unmarshalErr) + return + } + backends := f.([]interface{}) + + for _, backendi := range backends { + backend := backendi.(map[string]interface{}) + if backend["name"].(string) == name { + printed, _ := json.MarshalIndent(backend, "", " ") + fmt.Println(string(printed)) + return + } + } + fmt.Println("A backend of this name was not found.") +} + +func certGet(host string) { + statusCode, body, requestErr := nginx.NewGetStatusRequest(certsPath + "?hostname=" + host) + if requestErr != nil { + fmt.Println(requestErr) + return + } + + if statusCode == 200 { + fmt.Print(string(body)) + return + } else if statusCode != 404 { + fmt.Printf("Nginx returned code %v\n", statusCode) + fmt.Println(string(body)) + return + } + + fmt.Printf("No cert found for host %v\n", host) +} + +func general() { + statusCode, body, requestErr := nginx.NewGetStatusRequest(generalPath) + if requestErr != nil { + fmt.Println(requestErr) + return + } + if statusCode != 200 { + fmt.Printf("Nginx returned code %v\n", statusCode) + return + } + + var prettyBuffer bytes.Buffer + indentErr := json.Indent(&prettyBuffer, body, "", " ") + if indentErr != nil { + fmt.Println(indentErr) + return + } + + fmt.Println(string(prettyBuffer.Bytes())) +} + +func readNginxConf() { + conf, err := nginx.ReadNginxConf() + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(conf) +} diff --git a/cmd/nginx/flags.go b/cmd/nginx/flags.go index df5a8167ba..cdf9afb9e9 100644 --- a/cmd/nginx/flags.go +++ b/cmd/nginx/flags.go @@ -31,6 +31,7 @@ import ( "k8s.io/ingress-nginx/internal/ingress/controller" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" ing_net "k8s.io/ingress-nginx/internal/net" + "k8s.io/ingress-nginx/internal/nginx" ) func parseFlags() (bool, *controller.Configuration, error) { @@ -151,7 +152,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en httpPort = flags.Int("http-port", 80, `Port to use for servicing HTTP traffic.`) httpsPort = flags.Int("https-port", 443, `Port to use for servicing HTTPS traffic.`) - statusPort = flags.Int("status-port", 18080, `Port to use for exposing NGINX status pages.`) + _ = flags.Int("status-port", 18080, `Port to use for exposing NGINX status pages.`) sslProxyPort = flags.Int("ssl-passthrough-proxy-port", 442, `Port to use internally for SSL Passthrough.`) defServerPort = flags.Int("default-server-port", 8181, `Port to use for exposing the default server (catch-all).`) healthzPort = flags.Int("healthz-port", 10254, "Port to use for the healthz endpoint.") @@ -160,7 +161,7 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en `Disable support for catch-all Ingresses`) ) - flags.MarkDeprecated("sort-backends", "Feature removed because of the lua load balancer that removed the need of reloads for change in endpoints") + flags.MarkDeprecated("status-port", `The status port is a unix socket now.`) flag.Set("logtostderr", "true") @@ -200,10 +201,6 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --https-port", *httpsPort) } - if !ing_net.IsPortAvailable(*statusPort) { - return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --status-port", *statusPort) - } - if !ing_net.IsPortAvailable(*defServerPort) { return false, nil, fmt.Errorf("Port %v is already in use. Please check the flag --default-server-port", *defServerPort) } @@ -224,6 +221,8 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en return false, nil, fmt.Errorf("Flags --publish-service and --publish-status-address are mutually exclusive") } + nginx.HealthPath = *defHealthzURL + config := &controller.Configuration{ APIServerHost: *apiserverHost, KubeConfigFile: *kubeConfigFile, @@ -241,7 +240,6 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en TCPConfigMapName: *tcpConfigMapName, UDPConfigMapName: *udpConfigMapName, DefaultSSLCertificate: *defSSLCertificate, - DefaultHealthzURL: *defHealthzURL, HealthCheckTimeout: *healthCheckTimeout, PublishService: *publishSvc, PublishStatusAddress: *publishStatusAddress, @@ -256,7 +254,6 @@ Feature backed by OpenResty Lua libraries. Requires that OCSP stapling is not en HTTP: *httpPort, HTTPS: *httpsPort, SSLProxy: *sslProxyPort, - Status: *statusPort, }, DisableCatchAll: *disableCatchAll, } diff --git a/cmd/nginx/main.go b/cmd/nginx/main.go index 5f5b4b9cb2..0ccbd6ed2e 100644 --- a/cmd/nginx/main.go +++ b/cmd/nginx/main.go @@ -131,7 +131,7 @@ func main() { mc := metric.NewDummyCollector() if conf.EnableMetrics { - mc, err = metric.NewCollector(conf.ListenPorts.Status, conf.MetricsPerHost, reg) + mc, err = metric.NewCollector(conf.MetricsPerHost, reg) if err != nil { klog.Fatalf("Error creating prometheus collector: %v", err) } diff --git a/cmd/plugin/ingress-nginx.yaml.tmpl b/cmd/plugin/ingress-nginx.yaml.tmpl new file mode 100644 index 0000000000..b7ccebd723 --- /dev/null +++ b/cmd/plugin/ingress-nginx.yaml.tmpl @@ -0,0 +1,40 @@ +apiVersion: krew.googlecontainertools.github.com/v1alpha2 +kind: Plugin +metadata: + name: ingress-nginx +spec: + shortDescription: Interact with ingress-nginx + description: | + The official kubectl plugin for ingress-nginx. + version: %%%tag%%% + platforms: + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-darwin-amd64.tar.gz + sha256: %%%shasum_darwin_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx" + selector: + matchLabels: + os: darwin + arch: amd64 + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-linux-amd64.tar.gz + sha256: %%%shasum_linux_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx" + selector: + matchLabels: + os: linux + arch: amd64 + - uri: https://github.com/kubernetes/ingress-nginx/releases/download/nginx-%%%tag%%%/kubectl-ingress_nginx-windows-amd64.tar.gz + sha256: %%%shasum_windows_amd64%%% + files: + - from: "*" + to: "." + bin: "./kubectl-ingress_nginx" + selector: + matchLabels: + os: windows + arch: amd64 diff --git a/cmd/plugin/main.go b/cmd/plugin/main.go new file mode 100644 index 0000000000..b5e851cb35 --- /dev/null +++ b/cmd/plugin/main.go @@ -0,0 +1,416 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "github.com/spf13/cobra" + "os" + "strings" + "text/tabwriter" + + "k8s.io/api/extensions/v1beta1" + "k8s.io/cli-runtime/pkg/genericclioptions" + + //Just importing this is supposed to allow cloud authentication + // eg GCP, AWS, Azure ... + _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/ingress-nginx/cmd/plugin/request" + "k8s.io/ingress-nginx/cmd/plugin/util" + "k8s.io/ingress-nginx/internal/nginx" +) + +func main() { + rootCmd := &cobra.Command{ + Use: "ingress-nginx", + Short: "A kubectl plugin for inspecting your ingress-nginx deployments", + } + + // Respect some basic kubectl flags like --namespace + flags := genericclioptions.NewConfigFlags() + flags.AddFlags(rootCmd.PersistentFlags()) + + ingCmd := &cobra.Command{ + Use: "ingresses", + Aliases: []string{"ingress", "ing"}, + Short: "Provide a short summary of all of the ingress definitions", + RunE: func(cmd *cobra.Command, args []string) error { + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + allNamespaces, err := cmd.Flags().GetBool("all-namespaces") + if err != nil { + return err + } + + util.PrintError(ingresses(flags, host, allNamespaces)) + return nil + }, + } + ingCmd.Flags().String("host", "", "Show just the ingress definitions for this hostname") + ingCmd.Flags().Bool("all-namespaces", false, "Find ingress definitions from all namespaces") + rootCmd.AddCommand(ingCmd) + + confCmd := &cobra.Command{ + Use: "conf", + Short: "Inspect the generated nginx.conf", + RunE: func(cmd *cobra.Command, args []string) error { + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + pod, err := cmd.Flags().GetString("pod") + if err != nil { + return err + } + + util.PrintError(conf(flags, host, pod)) + return nil + }, + } + confCmd.Flags().String("host", "", "Print just the server block with this hostname") + confCmd.Flags().String("pod", "", "Query a particular ingress-nginx pod") + rootCmd.AddCommand(confCmd) + + generalCmd := &cobra.Command{ + Use: "general", + Short: "Inspect the other dynamic ingress-nginx information", + RunE: func(cmd *cobra.Command, args []string) error { + pod, err := cmd.Flags().GetString("pod") + if err != nil { + return err + } + + util.PrintError(general(flags, pod)) + return nil + }, + } + generalCmd.Flags().String("pod", "", "Query a particular ingress-nginx pod") + rootCmd.AddCommand(generalCmd) + + infoCmd := &cobra.Command{ + Use: "info", + Short: "Show information about the ingress-nginx service", + RunE: func(cmd *cobra.Command, args []string) error { + util.PrintError(info(flags)) + return nil + }, + } + rootCmd.AddCommand(infoCmd) + + backendsCmd := &cobra.Command{ + Use: "backends", + Short: "Inspect the dynamic backend information of an ingress-nginx instance", + RunE: func(cmd *cobra.Command, args []string) error { + pod, err := cmd.Flags().GetString("pod") + if err != nil { + return err + } + backend, err := cmd.Flags().GetString("backend") + if err != nil { + return err + } + onlyList, err := cmd.Flags().GetBool("list") + if err != nil { + return err + } + if onlyList && backend != "" { + return fmt.Errorf("--list and --backend cannot both be specified") + } + + util.PrintError(backends(flags, pod, backend, onlyList)) + return nil + }, + } + backendsCmd.Flags().String("pod", "", "Query a particular ingress-nginx pod") + backendsCmd.Flags().String("backend", "", "Output only the information for the given backend") + backendsCmd.Flags().Bool("list", false, "Output a newline-separated list of backend names") + rootCmd.AddCommand(backendsCmd) + + certsCmd := &cobra.Command{ + Use: "certs", + Short: "Output the certificate data stored in an ingress-nginx pod", + RunE: func(cmd *cobra.Command, args []string) error { + pod, err := cmd.Flags().GetString("pod") + if err != nil { + return err + } + host, err := cmd.Flags().GetString("host") + if err != nil { + return err + } + + util.PrintError(certs(flags, pod, host)) + return nil + }, + } + certsCmd.Flags().String("host", "", "Get the cert for this hostname") + certsCmd.Flags().String("pod", "", "Query a particular ingress-nginx pod") + cobra.MarkFlagRequired(certsCmd.Flags(), "host") + rootCmd.AddCommand(certsCmd) + + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func certs(flags *genericclioptions.ConfigFlags, pod string, host string) error { + command := []string{"/dbg", "certs", "get", host} + var out string + var err error + if pod != "" { + out, err = request.NamedPodExec(flags, pod, command) + } else { + out, err = request.IngressPodExec(flags, command) + } + if err != nil { + return err + } + + fmt.Print(out) + return nil +} + +func info(flags *genericclioptions.ConfigFlags) error { + service, err := request.GetIngressService(flags) + if err != nil { + return err + } + + fmt.Printf("Service cluster IP address: %v\n", service.Spec.ClusterIP) + fmt.Printf("LoadBalancer IP|CNAME: %v\n", service.Spec.LoadBalancerIP) + return nil +} + +func backends(flags *genericclioptions.ConfigFlags, pod string, backend string, onlyList bool) error { + var command []string + if onlyList { + command = []string{"/dbg", "backends", "list"} + } else if backend != "" { + command = []string{"/dbg", "backends", "get", backend} + } else { + command = []string{"/dbg", "backends", "all"} + } + + var out string + var err error + if pod != "" { + out, err = request.NamedPodExec(flags, pod, command) + } else { + out, err = request.IngressPodExec(flags, command) + } + if err != nil { + return err + } + + fmt.Print(out) + return nil +} + +func general(flags *genericclioptions.ConfigFlags, pod string) error { + var general string + var err error + if pod != "" { + general, err = request.NamedPodExec(flags, pod, []string{"/dbg", "general"}) + } else { + general, err = request.IngressPodExec(flags, []string{"/dbg", "general"}) + } + if err != nil { + return err + } + + fmt.Print(general) + return nil +} + +func ingresses(flags *genericclioptions.ConfigFlags, host string, allNamespaces bool) error { + var namespace string + if allNamespaces { + namespace = "" + } else { + namespace = util.GetNamespace(flags) + } + + ingresses, err := request.GetIngressDefinitions(flags, namespace) + if err != nil { + return err + } + + rows := getIngressRows(&ingresses) + + if host != "" { + rowsWithHost := make([]ingressRow, 0) + for _, row := range rows { + if row.Host == host { + rowsWithHost = append(rowsWithHost, row) + } + } + rows = rowsWithHost + } + + printer := tabwriter.NewWriter(os.Stdout, 6, 4, 3, ' ', 0) + defer printer.Flush() + + if allNamespaces { + fmt.Fprintln(printer, "NAMESPACE\tINGRESS NAME\tHOST+PATH\tADDRESSES\tTLS\tSERVICE\tSERVICE PORT") + } else { + fmt.Fprintln(printer, "INGRESS NAME\tHOST+PATH\tADDRESSES\tTLS\tSERVICE\tSERVICE PORT") + } + + for _, row := range rows { + var tlsMsg string + if row.TLS { + tlsMsg = "YES" + } else { + tlsMsg = "NO" + } + if allNamespaces { + fmt.Fprintf(printer, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t\n", row.Namespace, row.IngressName, row.Host+row.Path, row.Address, tlsMsg, row.ServiceName, row.ServicePort) + } else { + fmt.Fprintf(printer, "%v\t%v\t%v\t%v\t%v\t%v\t\n", row.IngressName, row.Host+row.Path, row.Address, tlsMsg, row.ServiceName, row.ServicePort) + } + } + + return nil +} + +func conf(flags *genericclioptions.ConfigFlags, host string, pod string) error { + var nginxConf string + var err error + if pod != "" { + nginxConf, err = request.NamedPodExec(flags, pod, []string{"/dbg", "conf"}) + } else { + nginxConf, err = request.IngressPodExec(flags, []string{"/dbg", "conf"}) + } + if err != nil { + return err + } + + if host != "" { + block, err := nginx.GetServerBlock(nginxConf, host) + if err != nil { + return err + } + + fmt.Println(strings.TrimRight(strings.Trim(block, " \n"), " \n\t")) + } else { + fmt.Print(nginxConf) + } + + return nil +} + +type ingressRow struct { + Namespace string + IngressName string + Host string + Path string + TLS bool + ServiceName string + ServicePort string + Address string +} + +func getIngressRows(ingresses *[]v1beta1.Ingress) []ingressRow { + rows := make([]ingressRow, 0) + + for _, ing := range *ingresses { + + address := "" + for _, lbIng := range ing.Status.LoadBalancer.Ingress { + if len(lbIng.IP) > 0 { + address = address + lbIng.IP + "," + } + if len(lbIng.Hostname) > 0 { + address = address + lbIng.Hostname + "," + } + } + if len(address) > 0 { + address = address[:len(address)-1] + } + + tlsHosts := make(map[string]struct{}) + for _, tls := range ing.Spec.TLS { + for _, host := range tls.Hosts { + tlsHosts[host] = struct{}{} + } + } + + defaultBackendService := "" + defaultBackendPort := "" + if ing.Spec.Backend != nil { + defaultBackendService = ing.Spec.Backend.ServiceName + defaultBackendPort = ing.Spec.Backend.ServicePort.String() + } + + // Handle catch-all ingress + if len(ing.Spec.Rules) == 0 && len(defaultBackendService) > 0 { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: "*", + ServiceName: defaultBackendService, + ServicePort: defaultBackendPort, + Address: address, + } + + rows = append(rows, row) + continue + } + + for _, rule := range ing.Spec.Rules { + _, hasTLS := tlsHosts[rule.Host] + + //Handle ingress with no paths + if rule.HTTP == nil { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: rule.Host, + Path: "", + TLS: hasTLS, + ServiceName: defaultBackendService, + ServicePort: defaultBackendPort, + Address: address, + } + rows = append(rows, row) + continue + } + + for _, path := range rule.HTTP.Paths { + row := ingressRow{ + Namespace: ing.Namespace, + IngressName: ing.Name, + Host: rule.Host, + Path: path.Path, + TLS: hasTLS, + ServiceName: path.Backend.ServiceName, + ServicePort: path.Backend.ServicePort.String(), + Address: address, + } + + rows = append(rows, row) + } + } + } + + return rows +} diff --git a/cmd/plugin/request/request.go b/cmd/plugin/request/request.go new file mode 100644 index 0000000000..4d1145ee8e --- /dev/null +++ b/cmd/plugin/request/request.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "bytes" + "fmt" + apiv1 "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes/scheme" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + extensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/ingress-nginx/cmd/plugin/util" +) + +const ( + ingressPodName = "nginx-ingress-controller" + ingressServiceName = "ingress-nginx" +) + +// NamedPodExec finds a pod with the given name, executes a command inside it, and returns stdout +func NamedPodExec(flags *genericclioptions.ConfigFlags, podName string, cmd []string) (string, error) { + allPods, err := getPods(flags) + if err != nil { + return "", err + } + + for _, pod := range allPods { + if pod.Name == podName { + return podExec(flags, &pod, cmd) + } + } + + return "", fmt.Errorf("Pod %v not found in namespace %v", podName, util.GetNamespace(flags)) +} + +// IngressPodExec finds an ingress-nginx pod in the given namespace, executes a command inside it, and returns stdout +func IngressPodExec(flags *genericclioptions.ConfigFlags, cmd []string) (string, error) { + ings, err := getIngressPods(flags) + if err != nil { + return "", err + } + + if len(ings) == 0 { + return "", fmt.Errorf("No ingress-nginx pods found in namespace %v", util.GetNamespace(flags)) + } + + return podExec(flags, &ings[0], cmd) +} + +func podExec(flags *genericclioptions.ConfigFlags, pod *apiv1.Pod, cmd []string) (string, error) { + config, err := flags.ToRESTConfig() + if err != nil { + return "", err + } + + client, err := corev1.NewForConfig(config) + if err != nil { + return "", err + } + + namespace, _, err := flags.ToRawKubeConfigLoader().Namespace() + if err != nil { + return "", err + } + + restClient := client.RESTClient() + + req := restClient.Post(). + Resource("pods"). + Name(pod.Name). + Namespace(namespace). + SubResource("exec"). + Param("container", ingressPodName) + + req.VersionedParams(&apiv1.PodExecOptions{ + Container: ingressPodName, + Command: cmd, + Stdin: false, + Stdout: true, + Stderr: false, + TTY: false, + }, scheme.ParameterCodec) + + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + + if err != nil { + return "", err + } + + stdout := bytes.NewBuffer(make([]byte, 0)) + err = exec.Stream(remotecommand.StreamOptions{ + Stdout: stdout, + }) + + return stdout.String(), err +} + +func getIngressPods(flags *genericclioptions.ConfigFlags) ([]apiv1.Pod, error) { + pods, err := getPods(flags) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + ingressPods := make([]apiv1.Pod, 0) + for _, pod := range pods { + if pod.Spec.Containers[0].Name == ingressPodName { + ingressPods = append(ingressPods, pod) + } + } + + return ingressPods, nil +} + +func getPods(flags *genericclioptions.ConfigFlags) ([]apiv1.Pod, error) { + namespace := util.GetNamespace(flags) + + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]apiv1.Pod, 0), err + } + + api, err := corev1.NewForConfig(rawConfig) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + pods, err := api.Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]apiv1.Pod, 0), err + } + + return pods.Items, nil +} + +// GetIngressDefinitions returns an array of Ingress resource definitions +func GetIngressDefinitions(flags *genericclioptions.ConfigFlags, namespace string) ([]v1beta1.Ingress, error) { + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]v1beta1.Ingress, 0), err + } + + api, err := extensions.NewForConfig(rawConfig) + if err != nil { + return make([]v1beta1.Ingress, 0), err + } + + pods, err := api.Ingresses(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]v1beta1.Ingress, 0), err + } + + return pods.Items, nil +} + +// GetIngressService finds and returns the ingress-nginx service definition +func GetIngressService(flags *genericclioptions.ConfigFlags) (apiv1.Service, error) { + services, err := getServices(flags) + if err != nil { + return apiv1.Service{}, err + } + + for _, svc := range services { + if svc.Name == ingressServiceName { + return svc, nil + } + } + + return apiv1.Service{}, fmt.Errorf("Could not find service %v in namespace %v", ingressServiceName, util.GetNamespace(flags)) +} + +func getServices(flags *genericclioptions.ConfigFlags) ([]apiv1.Service, error) { + namespace := util.GetNamespace(flags) + + rawConfig, err := flags.ToRESTConfig() + if err != nil { + return make([]apiv1.Service, 0), err + } + + api, err := corev1.NewForConfig(rawConfig) + if err != nil { + return make([]apiv1.Service, 0), err + } + + services, err := api.Services(namespace).List(metav1.ListOptions{}) + if err != nil { + return make([]apiv1.Service, 0), err + } + + return services.Items, nil + +} diff --git a/cmd/plugin/util/util.go b/cmd/plugin/util/util.go new file mode 100644 index 0000000000..3293fbf02e --- /dev/null +++ b/cmd/plugin/util/util.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" +) + +// PrintError receives an error value and prints it if it exists +func PrintError(e error) { + if e != nil { + fmt.Println(e) + } +} + +func printWithError(s string, e error) { + if e != nil { + fmt.Println(e) + } + fmt.Print(s) +} + +func printOrError(s string, e error) error { + if e != nil { + return e + } + fmt.Print(s) + return nil +} + +// GetNamespace takes a set of kubectl flag values and returns the namespace we should be operating in +func GetNamespace(flags *genericclioptions.ConfigFlags) string { + namespace, _, err := flags.ToRawKubeConfigLoader().Namespace() + if err != nil || len(namespace) == 0 { + namespace = apiv1.NamespaceDefault + } + return namespace +} diff --git a/deploy/mandatory.yaml b/deploy/mandatory.yaml index affedb648a..7884ceca91 100644 --- a/deploy/mandatory.yaml +++ b/deploy/mandatory.yaml @@ -211,7 +211,7 @@ spec: serviceAccountName: nginx-ingress-serviceaccount containers: - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.22.0 args: - /nginx-ingress-controller - --configmap=$(POD_NAMESPACE)/nginx-configuration @@ -251,7 +251,7 @@ spec: initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 readinessProbe: failureThreshold: 3 httpGet: @@ -260,6 +260,6 @@ spec: scheme: HTTP periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 --- diff --git a/deploy/provider/aws/patch-configmap-l7.yaml b/deploy/provider/aws/patch-configmap-l7.yaml index c71eaab0a8..b1bcd2a971 100644 --- a/deploy/provider/aws/patch-configmap-l7.yaml +++ b/deploy/provider/aws/patch-configmap-l7.yaml @@ -8,6 +8,7 @@ metadata: app.kubernetes.io/part-of: ingress-nginx data: use-proxy-protocol: "false" - + use-forwarded-headers: "true" + proxy-real-ip-cidr: "0.0.0.0/0" # restrict this to the IP addresses of ELB --- diff --git a/deploy/with-rbac.yaml b/deploy/with-rbac.yaml index afc98d2565..3418f54d46 100644 --- a/deploy/with-rbac.yaml +++ b/deploy/with-rbac.yaml @@ -24,7 +24,7 @@ spec: serviceAccountName: nginx-ingress-serviceaccount containers: - name: nginx-ingress-controller - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0 + image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.22.0 args: - /nginx-ingress-controller - --configmap=$(POD_NAMESPACE)/nginx-configuration @@ -64,7 +64,7 @@ spec: initialDelaySeconds: 10 periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 readinessProbe: failureThreshold: 3 httpGet: @@ -73,7 +73,7 @@ spec: scheme: HTTP periodSeconds: 10 successThreshold: 1 - timeoutSeconds: 1 + timeoutSeconds: 10 --- diff --git a/docs/examples/auth/oauth-external-auth/README.md b/docs/examples/auth/oauth-external-auth/README.md index 4f1219392e..d6ad3fbed9 100644 --- a/docs/examples/auth/oauth-external-auth/README.md +++ b/docs/examples/auth/oauth-external-auth/README.md @@ -39,7 +39,7 @@ into a Kubernetes cluster and use it to protect the Kubernetes Dashboard using g 1. Install the kubernetes dashboard ```console -kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.0.yaml +kubectl create -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/kubernetes-dashboard/v1.10.1.yaml ``` 2. Create a [custom Github OAuth application](https://github.com/settings/applications/new) diff --git a/docs/examples/grpc/README.md b/docs/examples/grpc/README.md index 0416f5e07f..06dcb5664e 100644 --- a/docs/examples/grpc/README.md +++ b/docs/examples/grpc/README.md @@ -17,7 +17,7 @@ nginx controller. for grpc support. 4. You have a backend application running a gRPC server and listening for TCP traffic. If you prefer, you can use the - [fortune-teller](https://github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller) + [fortune-teller](https://github.com/kubernetes/ingress-nginx/tree/master/images/grpc-fortune-teller) application provided here as an example. ### Step 1: kubernetes `Deployment` @@ -30,7 +30,7 @@ This is a standard kubernetes deployment object. It is running a grpc service listening on port `50051`. The sample application -[fortune-teller-app](https://github.com/kubernetes/ingress-nginx/images/grpc-fortune-teller) +[fortune-teller-app](https://github.com/kubernetes/ingress-nginx/tree/master/images/grpc-fortune-teller) is a grpc server implemented in go. Here's the stripped-down implementation: ```go diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 7e6784c952..d5f7840e53 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -93,6 +93,99 @@ kube-system kube-dns ClusterIP 10.96.0.10 kube-system kubernetes-dashboard NodePort 10.103.128.17 80:30000/TCP 30m ``` +Use the `ingress-nginx` kubectl plugin + +Install [krew](https://github.com/GoogleContainerTools/krew), then run +```console +$ kubectl krew install --manifest https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/cmd/plugin/release/ingress-nginx.yaml +``` +to install the plugin. Then run +```console +$ kubectl ingress-nginx --help +``` +to make sure the plugin is properly installed and to get a list of commands. The plugin includes all of the commands present in the `/dbg` tool, plus a more detailed version of `kubectl get ingresses` available by runnning `kubectl ingress-nginx ingresses`. + +Use the `/dbg` Tool to Check Dynamic Configuration + +```console +$ kubectl exec -n nginx-ingress-controller-67956bf89d-fv58j /dbg +dbg is a tool for quickly inspecting the state of the nginx instance + +Usage: + dbg [command] + +Available Commands: + backends Inspect the dynamically-loaded backends information + conf Dump the contents of /etc/nginx/nginx.conf + general Output the general dynamic lua state + help Help about any command + +Flags: + -h, --help help for dbg + +Use "dbg [command] --help" for more information about a command. + +``` + +```console +$ kubectl exec -n nginx-ingress-controller-67956bf89d-fv58j /dbg backends +Inspect the dynamically-loaded backends information. + +Usage: + dbg backends [command] + +Available Commands: + all Output the all dynamic backend information as a JSON array + get Output the backend information only for the backend that has this name + list Output a newline-separated list of the backend names + +Flags: + -h, --help help for backends + +Use "dbg backends [command] --help" for more information about a command. +``` + +```console +$ kubectl exec -n nginx-ingress-controller-67956bf89d-fv58j /dbg backends list +coffee-svc-80 +tea-svc-80 +upstream-default-backend +``` + +```console +$ kubectl exec -n nginx-ingress-controller-67956bf89d-fv58j /dbg backends get coffee-svc-80 +{ + "endpoints": [ + { + "address": "10.1.1.112", + "port": "8080" + }, + { + "address": "10.1.1.119", + "port": "8080" + }, + { + "address": "10.1.1.121", + "port": "8080" + } + ], + "load-balance": "ewma", + "name": "coffee-svc-80", + "noServer": false, + "port": 0, + "secureCACert": { + "caFilename": "", + "pemSha": "", + "secret": "" + }, + "service": { + "metadata": { + "creationTimestamp": null + }, + "spec": { +.... +``` + ## Debug Logging Using the flag `--v=XX` it is possible to increase the level of logging. This is performed by editing diff --git a/docs/user-guide/cli-arguments.md b/docs/user-guide/cli-arguments.md index a593fd4146..4ec5cd3f6e 100644 --- a/docs/user-guide/cli-arguments.md +++ b/docs/user-guide/cli-arguments.md @@ -34,7 +34,6 @@ They are set in the container spec of the `nginx-ingress-controller` Deployment | `--report-node-internal-ip-address` | Set the load-balancer status of Ingress objects to internal Node addresses instead of external. Requires the update-status parameter. | | `--sort-backends` | Sort servers inside NGINX upstreams. | | `--ssl-passthrough-proxy-port int` | Port to use internally for SSL Passthrough. (default 442) | -| `--status-port int` | Port to use for exposing NGINX status pages. (default 18080) | | `--stderrthreshold severity` | logs at or above this threshold go to stderr (default 2) | | `--sync-period duration` | Period at which the controller forces the repopulation of its local object stores. Disabled by default. | | `--sync-rate-limit float32` | Define the sync frequency upper limit (default 0.3) | @@ -46,4 +45,4 @@ They are set in the container spec of the `nginx-ingress-controller` Deployment | `--version` | Show release information about the NGINX Ingress controller and exit. | | `--vmodule moduleSpec` | comma-separated list of pattern=N settings for file-filtered logging | | `--watch-namespace string` | Namespace the controller watches for updates to Kubernetes objects. This includes Ingresses, Services and all configuration resources. All namespaces are watched if this parameter is left empty. | -| `--disable-catch-all` | Disable support for catch-all Ingresses. | +| `--disable-catch-all` | Disable support for catch-all Ingresses. | \ No newline at end of file diff --git a/docs/user-guide/ingress-path-matching.md b/docs/user-guide/ingress-path-matching.md index 80a23b0222..6aa1d9c3d2 100644 --- a/docs/user-guide/ingress-path-matching.md +++ b/docs/user-guide/ingress-path-matching.md @@ -71,13 +71,13 @@ kind: Ingress metadata: name: test-ingress-2 annotations: - nginx.ingress.kubernetes.io/rewrite-target: / + nginx.ingress.kubernetes.io/rewrite-target: /$1 spec: rules: - host: test.com http: paths: - - path: /foo/bar/.+ + - path: /foo/bar/(.+) backend: serviceName: service3 servicePort: 80 diff --git a/docs/user-guide/nginx-configuration/annotations.md b/docs/user-guide/nginx-configuration/annotations.md index c77dec82b9..0093b9d290 100644 --- a/docs/user-guide/nginx-configuration/annotations.md +++ b/docs/user-guide/nginx-configuration/annotations.md @@ -30,6 +30,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/backend-protocol](#backend-protocol)|string|HTTP,HTTPS,GRPC,GRPCS,AJP| |[nginx.ingress.kubernetes.io/canary](#canary)|"true" or "false"| |[nginx.ingress.kubernetes.io/canary-by-header](#canary)|string| +|[nginx.ingress.kubernetes.io/canary-by-header-value](#canary)|string |[nginx.ingress.kubernetes.io/canary-by-cookie](#canary)|string| |[nginx.ingress.kubernetes.io/canary-weight](#canary)|number| |[nginx.ingress.kubernetes.io/client-body-buffer-size](#client-body-buffer-size)|string| @@ -63,6 +64,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/proxy-redirect-to](#proxy-redirect)|string| |[nginx.ingress.kubernetes.io/enable-rewrite-log](#enable-rewrite-log)|"true" or "false"| |[nginx.ingress.kubernetes.io/rewrite-target](#rewrite)|URI| +|[nginx.ingress.kubernetes.io/satisfy](#satisfy)|string| |[nginx.ingress.kubernetes.io/secure-verify-ca-secret](#secure-backends)|string| |[nginx.ingress.kubernetes.io/server-alias](#server-alias)|string| |[nginx.ingress.kubernetes.io/server-snippet](#server-snippet)|string| @@ -78,6 +80,7 @@ You can add these Kubernetes annotations to specific Ingress objects to customiz |[nginx.ingress.kubernetes.io/upstream-vhost](#custom-nginx-upstream-vhost)|string| |[nginx.ingress.kubernetes.io/whitelist-source-range](#whitelist-source-range)|CIDR| |[nginx.ingress.kubernetes.io/proxy-buffering](#proxy-buffering)|string| +|[nginx.ingress.kubernetes.io/proxy-buffers-number](#proxy-buffers-number)|number| |[nginx.ingress.kubernetes.io/proxy-buffer-size](#proxy-buffer-size)|string| |[nginx.ingress.kubernetes.io/ssl-ciphers](#ssl-ciphers)|string| |[nginx.ingress.kubernetes.io/connection-proxy-header](#connection-proxy-header)|string| @@ -106,6 +109,8 @@ In some cases, you may want to "canary" a new set of changes by sending a small * `nginx.ingress.kubernetes.io/canary-by-header`: The header to use for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the request header is set to `always`, it will be routed to the canary. When the header is set to `never`, it will never be routed to the canary. For any other value, the header will be ignored and the request compared against the other canary rules by precedence. +* `nginx.ingress.kubernetes.io/canary-by-header-value`: The header value to match for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the request header is set to this value, it will be routed to the canary. For any other header value, the header will be ignored and the request compared against the other canary rules by precedence. This annotation has to be used together with . The annotation is an extension of the `nginx.ingress.kubernetes.io/canary-by-header` to allow customizing the header value instead of using hardcoded values. It doesn't have any effect if the `nginx.ingress.kubernetes.io/canary-by-header` annotation is not defined. + * `nginx.ingress.kubernetes.io/canary-by-cookie`: The cookie to use for notifying the Ingress to route the request to the service specified in the Canary Ingress. When the cookie value is set to `always`, it will be routed to the canary. When the cookie is set to `never`, it will never be routed to the canary. For any other value, the cookie will be ingored and the request compared against the other canary rules by precedence. * `nginx.ingress.kubernetes.io/canary-weight`: The integer based (0 - 100) percent of random requests that should be routed to the service specified in the canary Ingress. A weight of 0 implies that no requests will be sent to the service in the Canary ingress by this canary rule. A weight of 100 means implies all requests will be sent to the alternative service specified in the Ingress. @@ -243,23 +248,23 @@ nginx.ingress.kubernetes.io/configuration-snippet: | more_set_headers "Request-Id: $req_id"; ``` -### Default Backend - -The ingress controller requires a [default backend](../default-backend.md). -This service handles the response when the service in the Ingress rule does not have endpoints. -This is a global configuration for the ingress controller. In some cases could be required to return a custom content or format. In this scenario we can use the annotation `nginx.ingress.kubernetes.io/default-backend: ` to specify a custom default backend. - ### Custom HTTP Errors -Like the [`custom-http-errors`](./configmap.md#custom-http-errors) value in the ConfigMap, this annotation will set NGINX `proxy-intercept-errors`, but only for the NGINX location associated with this ingress. +Like the [`custom-http-errors`](./configmap.md#custom-http-errors) value in the ConfigMap, this annotation will set NGINX `proxy-intercept-errors`, but only for the NGINX location associated with this ingress. If a [default backend annotation](#default-backend) is specified on the ingress, the errors will be routed to that annotation's default backend service (instead of the global default backend). Different ingresses can specify different sets of error codes. Even if multiple ingress objects share the same hostname, this annotation can be used to intercept different error codes for each ingress (for example, different error codes to be intercepted for different paths on the same hostname, if each path is on a different ingress). If `custom-http-errors` is also specified globally, the error values specified in this annotation will override the global value for the given ingress' hostname and path. Example usage: ``` -custom-http-errors: "404,415" +nginx.ingress.kubernetes.io/custom-http-errors: "404,415" ``` +### Default Backend + +This annotation is of the form `nginx.ingress.kubernetes.io/default-backend: ` to specify a custom default backend. This `` is a reference to a service inside of the same namespace in which you are applying this annotation. This annotation overrides the global default backend. + +This service will be handle the response when the service in the Ingress rule does not have active endpoints. It will also handle the error responses if both this annotation and the [custom-http-errors annotation](#custom-http-errors) is set. + ### Enable CORS To enable Cross-Origin Resource Sharing (CORS) in an Ingress rule, add the annotation @@ -543,6 +548,16 @@ To use custom values in an Ingress rule define these annotation: nginx.ingress.kubernetes.io/proxy-buffering: "on" ``` +### Proxy buffers Number + +Sets the number of the buffers in [`proxy_buffers`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) used for reading the first part of the response received from the proxied server. +By default proxy buffers number is set as 4 + +To configure this setting globally, set `proxy-buffers-number` in [NGINX ConfigMap](./configmap.md#proxy-buffers-number). To use custom values in an Ingress rule, define this annotation: +```yaml +nginx.ingress.kubernetes.io/proxy-buffers-number: "4" +``` + ### Proxy buffer size Sets the size of the buffer [`proxy_buffer_size`](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) used for reading the first part of the response received from the proxied server. @@ -724,7 +739,7 @@ an ip address to `nginx.ingress.kubernetes.io/influxdb-host`. If you deploy Infl ### Backend Protocol -Using `backend-protocol` annotations is possible to indicate how NGINX should communicate with the backend service. +Using `backend-protocol` annotations is possible to indicate how NGINX should communicate with the backend service. (Replaces `secure-backends` in older versions) Valid Values: HTTP, HTTPS, GRPC, GRPCS and AJP By default NGINX uses `HTTP`. @@ -757,3 +772,11 @@ When this annotation is set to `true`, the case insensitive regular expression [ Additionally, if the [`rewrite-target` annotation](#rewrite) is used on any Ingress for a given host, then the case insensitive regular expression [location modifier](https://nginx.org/en/docs/http/ngx_http_core_module.html#location) will be enforced on ALL paths for a given host regardless of what Ingress they are defined on. Please read about [ingress path matching](../ingress-path-matching.md) before using this modifier. + +### Satisfy + +By default, a request would need to satisfy all authentication requirements in order to be allowed. By using this annotation, requests that satisfy either any or all authentication requirements are allowed, based on the configuration value. + +```yaml +nginx.ingress.kubernetes.io/satisfy: "any" +``` diff --git a/docs/user-guide/nginx-configuration/configmap.md b/docs/user-guide/nginx-configuration/configmap.md index 36aaf2ce16..cd0e8035df 100644 --- a/docs/user-guide/nginx-configuration/configmap.md +++ b/docs/user-guide/nginx-configuration/configmap.md @@ -30,7 +30,9 @@ The following table shows a configuration option's name, type, and the default v |[add-headers](#add-headers)|string|""| |[allow-backend-server-header](#allow-backend-server-header)|bool|"false"| |[hide-headers](#hide-headers)|string array|empty| +|[access-log-params](#access-log-params)|string|""| |[access-log-path](#access-log-path)|string|"/var/log/nginx/access.log"| +|[enable-access-log-for-default-backend](#enable-access-log-for-default-backend)|bool|"false"| |[error-log-path](#error-log-path)|string|"/var/log/nginx/error.log"| |[enable-dynamic-tls-records](#enable-dynamic-tls-records)|bool|"true"| |[enable-modsecurity](#enable-modsecurity)|bool|"false"| @@ -131,6 +133,7 @@ The following table shows a configuration option's name, type, and the default v |[proxy-connect-timeout](#proxy-connect-timeout)|int|5| |[proxy-read-timeout](#proxy-read-timeout)|int|60| |[proxy-send-timeout](#proxy-send-timeout)|int|60| +|[proxy-buffers-number](#proxy-buffers-number)|int|4| |[proxy-buffer-size](#proxy-buffer-size)|string|"4k"| |[proxy-cookie-path](#proxy-cookie-path)|string|"off"| |[proxy-cookie-domain](#proxy-cookie-domain)|string|"off"| @@ -169,12 +172,23 @@ _**default:**_ empty _References:_ [http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_hide_header) +## access-log-params + +Additional params for access_log. For example, buffer=16k, gzip, flush=1m + +_References:_ +[http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log](http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) + ## access-log-path Access log path. Goes to `/var/log/nginx/access.log` by default. __Note:__ the file `/var/log/nginx/access.log` is a symlink to `/dev/stdout` +## enable-access-log-for-default-backend + +Enables logging access to default backend. _**default:**_ is disabled. + ## error-log-path Error log path. Goes to `/var/log/nginx/error.log` by default. @@ -754,6 +768,10 @@ Sets the timeout in seconds for [reading a response from the proxied server](htt Sets the timeout in seconds for [transmitting a request to the proxied server](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout). The timeout is set only between two successive write operations, not for the transmission of the whole request. +## proxy-buffers-number + +Sets the number of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers) received from the proxied server. This part usually contains a small response header. + ## proxy-buffer-size Sets the size of the buffer used for [reading the first part of the response](http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) received from the proxied server. This part usually contains a small response header. diff --git a/docs/user-guide/third-party-addons/opentracing.md b/docs/user-guide/third-party-addons/opentracing.md index c86d7b578e..dc530c2ae1 100644 --- a/docs/user-guide/third-party-addons/opentracing.md +++ b/docs/user-guide/third-party-addons/opentracing.md @@ -1,13 +1,13 @@ # OpenTracing -Enables requests served by nginx for distributed tracing via The OpenTracing Project. +Enables requests served by NGINX for distributed tracing via The OpenTracing Project. Using the third party module [opentracing-contrib/nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) the NGINX ingress controller can configure NGINX to enable [OpenTracing](http://opentracing.io) instrumentation. By default this feature is disabled. ## Usage -To enable the instrumentation we must enable opentracing in the configuration configmap: +To enable the instrumentation we must enable OpenTracing in the configuration ConfigMap: ``` data: enable-opentracing: "true" @@ -17,11 +17,16 @@ We must also set the host to use when uploading traces: ``` zipkin-collector-host: zipkin.default.svc.cluster.local -jaeger-collector-host: jaeger-collector.default.svc.cluster.local +jaeger-collector-host: jaeger-agent.default.svc.cluster.local +datadog-collector-host: datadog-agent.default.svc.cluster.local ``` +NOTE: While the option is called `jaeger-collector-host`, you will need to point this to a `jaeger-agent`, and not the `jaeger-collector` component. -Next you will need to deploy a distributed tracing system which uses OpenTracing. Both [Zipkin](https://github.com/openzipkin/zipkin) and -[Jaeger](https://github.com/jaegertracing/jaeger) have been tested. +Next you will need to deploy a distributed tracing system which uses OpenTracing. +[Zipkin](https://github.com/openzipkin/zipkin) and +[Jaeger](https://github.com/jaegertracing/jaeger) and +[Datadog](https://github.com/DataDog/dd-opentracing-cpp) +have been tested. Other optional configuration options: ``` @@ -46,8 +51,19 @@ jaeger-sampler-type # specifies the argument to be passed to the sampler constructor, Default: 1 jaeger-sampler-param + +# specifies the port to use when uploading traces, Default 8126 +datadog-collector-port + +# specifies the service name to use for any traces created, Default: nginx +datadog-service-name + +# specifies the operation name to use for any traces collected, Default: nginx.handle +datadog-operation-name-override ``` +All these options (including host) allow environment variables, such as `$HOSTNAME` or `$HOST_IP`. In the case of Jaeger, if you have a Jaeger agent running on each machine in your cluster, you can use something like `$HOST_IP` (which can be 'mounted' with the `status.hostIP` fieldpath, as described [here](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/#capabilities-of-the-downward-api)) to make sure traces will be sent to the local agent. + ## Examples The following examples show how to deploy and test different distributed tracing systems. These example can be performed @@ -56,14 +72,14 @@ using Minikube. ### Zipkin In the [rnburn/zipkin-date-server](https://github.com/rnburn/zipkin-date-server) -github repository is an example of a dockerized date service. To install the example and zipkin collector run: +GitHub repository is an example of a dockerized date service. To install the example and Zipkin collector run: ``` kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/zipkin.yaml kubectl create -f https://raw.githubusercontent.com/rnburn/zipkin-date-server/master/kubernetes/deployment.yaml ``` -Also we need to configure the NGINX controller configmap with the required values: +Also we need to configure the NGINX controller ConfigMap with the required values: ``` $ echo ' @@ -78,22 +94,22 @@ metadata: ' | kubectl replace -f - ``` -In the zipkin interface we can see the details: +In the Zipkin interface we can see the details: ![zipkin screenshot](../../images/zipkin-demo.png "zipkin collector screenshot") ### Jaeger -1. Enable Ingress addon in minikube: +1. Enable Ingress addon in Minikube: ``` $ minikube addons enable ingress ``` -2. Add minikube IP to /etc/hosts: +2. Add Minikube IP to /etc/hosts: ``` $ echo "$(minikube ip) example.com" | sudo tee -a /etc/hosts ``` -3. Apply a Basic Service and Ingress Resource: +3. Apply a basic Service and Ingress Resource: ``` # Create Echoheaders Deployment $ kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080 @@ -126,7 +142,7 @@ In the zipkin interface we can see the details: kind: ConfigMap data: enable-opentracing: "true" - jaeger-collector-host: jaeger-collector.default.svc.cluster.local + jaeger-collector-host: jaeger-agent.default.svc.cluster.local metadata: name: nginx-configuration namespace: kube-system @@ -178,5 +194,5 @@ In the zipkin interface we can see the details: http://192.168.99.100:30183 ``` - In the jaeger interface we can see the details: + In the Jaeger interface we can see the details: ![jaeger screenshot](../../images/jaeger-demo.png "jaeger collector screenshot") diff --git a/images/e2e/Dockerfile b/images/e2e/Dockerfile index 13accbf592..f14c6949cd 100644 --- a/images/e2e/Dockerfile +++ b/images/e2e/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM quay.io/kubernetes-ingress-controller/nginx-amd64:0.75 +FROM quay.io/kubernetes-ingress-controller/nginx-amd64:0.79 RUN clean-install \ g++ \ @@ -25,9 +25,9 @@ RUN clean-install \ python \ pkg-config -ENV GOLANG_VERSION 1.11.4 +ENV GOLANG_VERSION 1.11.5 ENV GO_ARCH linux-amd64 -ENV GOLANG_SHA fb26c30e6a04ad937bbc657a1b5bba92f80096af1e8ee6da6430c045a8db3a5b +ENV GOLANG_SHA ff54aafedff961eb94792487e827515da683d61a5f9482f668008832631e5d25 RUN set -eux; \ url="https://golang.org/dl/go${GOLANG_VERSION}.${GO_ARCH}.tar.gz"; \ @@ -62,5 +62,5 @@ RUN luarocks install luacheck \ RUN go get github.com/onsi/ginkgo/ginkgo \ && go get golang.org/x/lint/golint -RUN curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.4/bin/linux/amd64/kubectl \ +RUN curl -Lo /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.13.3/bin/linux/amd64/kubectl \ && chmod +x /usr/local/bin/kubectl diff --git a/images/nginx/Makefile b/images/nginx/Makefile index 6822fa6a0d..00aafca3b1 100644 --- a/images/nginx/Makefile +++ b/images/nginx/Makefile @@ -13,7 +13,7 @@ # limitations under the License. # 0.0.0 shouldn't clobber any released builds -TAG ?= 0.75 +TAG ?= 0.79 REGISTRY ?= quay.io/kubernetes-ingress-controller ARCH ?= $(shell go env GOARCH) DOCKER ?= docker diff --git a/images/nginx/README.md b/images/nginx/README.md index ca0d889f6a..788b014a31 100644 --- a/images/nginx/README.md +++ b/images/nginx/README.md @@ -12,6 +12,7 @@ This custom nginx image contains: - [nginx-opentracing](https://github.com/opentracing-contrib/nginx-opentracing) - [opentracing-cpp](https://github.com/opentracing/opentracing-cpp) - [zipkin-cpp-opentracing](https://github.com/rnburn/zipkin-cpp-opentracing) +- [dd-opentracing-cpp](https://github.com/DataDog/dd-opentracing-cpp) - [ModSecurity-nginx](https://github.com/SpiderLabs/ModSecurity-nginx) (only supported in x86_64) - [brotli](https://github.com/google/brotli) - [geoip2](https://github.com/leev/ngx_http_geoip2_module) diff --git a/images/nginx/rootfs/build.sh b/images/nginx/rootfs/build.sh index 1deb2b4922..8bfdeb6fc1 100755 --- a/images/nginx/rootfs/build.sh +++ b/images/nginx/rootfs/build.sh @@ -25,18 +25,21 @@ export SETMISC_VERSION=0.32 export MORE_HEADERS_VERSION=0.33 export NGINX_DIGEST_AUTH=274490cec649e7300fea97fed13d84e596bbc0ce export NGINX_SUBSTITUTIONS=bc58cb11844bc42735bbaef7085ea86ace46d05b -export NGINX_OPENTRACING_VERSION=ea9994d7135be5ad2e3009d0f270e063b1fb3b21 -export OPENTRACING_CPP_VERSION=1.5.0 +export NGINX_OPENTRACING_VERSION=0.8.0 +export OPENTRACING_CPP_VERSION=1.5.1 export ZIPKIN_CPP_VERSION=0.5.2 -export JAEGER_VERSION=ba0fa3fa6dbb01995d996f988a897e272100bf95 +export JAEGER_VERSION=cdfaf5bb25ff5f8ec179fd548e6c7c2ade9a6a09 +export MSGPACK_VERSION=3.1.1 +export DATADOG_CPP_VERSION=0.4.2 +export LUA_BRIDGE_TRACER_VERSION=0.1.0 export MODSECURITY_VERSION=fc061a57a8b0abda79b17cbe103d78db803fa575 -export LUA_NGX_VERSION=1c72f57ce87d4355d546a97c2bd8f5123a70db5c -export LUA_STREAM_NGX_VERSION=0.0.6rc2 +export LUA_NGX_VERSION=fd90f4e8252e9d06419317fdf525b55c65e15a50 +export LUA_STREAM_NGX_VERSION=0.0.6rc5 export LUA_UPSTREAM_VERSION=0.07 export NGINX_INFLUXDB_VERSION=0e2cb6cbf850a29c81e44be9e33d9a15d45c50e8 export GEOIP2_VERSION=3.2 export NGINX_AJP_VERSION=bf6cd93f2098b59260de8d494f0f4b1f11a84627 -export LUAJIT_VERSION=520d53a87dd44c637dddb6de313204211c2b212b +export LUAJIT_VERSION=0e646b54e1368acb2e39d89014ae649207f4d0a0 export BUILD_PATH=/tmp/build @@ -48,6 +51,8 @@ get_src() url="$2" f=$(basename "$url") + echo "Downloading $url" + curl -sSL "$url" -o "$f" echo "$hash $f" | sha256sum -c - || exit 10 tar xzf "$f" @@ -144,10 +149,10 @@ get_src ede0ad490cb9dd69da348bdea2a60a4c45284c9777b2f13fa48394b6b8e7671c \ get_src 618551948ab14cac51d6e4ad00452312c7b09938f59ebff4f93875013be31f2d \ "https://github.com/yaoweibin/ngx_http_substitutions_filter_module/archive/$NGINX_SUBSTITUTIONS.tar.gz" -get_src 343b4293ca0d4afa55bf1ab54c866766043b2585b6ce81467d3d3e25987fc186 \ - "https://github.com/opentracing-contrib/nginx-opentracing/archive/$NGINX_OPENTRACING_VERSION.tar.gz" +get_src b2159297814d5df153cf45f355bcd8ffdb71f2468e8149ad549d4f9c0cdc81ad \ + "https://github.com/opentracing-contrib/nginx-opentracing/archive/v$NGINX_OPENTRACING_VERSION.tar.gz" -get_src 4455ca507936bc4b658ded10a90d8ebbbd61c58f06207be565a4ffdc885687b5 \ +get_src 015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301 \ "https://github.com/opentracing/opentracing-cpp/archive/v$OPENTRACING_CPP_VERSION.tar.gz" get_src 30affaf0f3a84193f7127cc0135da91773ce45d902414082273dae78914f73df \ @@ -156,26 +161,35 @@ get_src 30affaf0f3a84193f7127cc0135da91773ce45d902414082273dae78914f73df \ get_src 073deba39f74eff81da917907465e1343c89b335244349d3d3b4ae9331de86f2 \ "https://github.com/SpiderLabs/ModSecurity-nginx/archive/$MODSECURITY_VERSION.tar.gz" -get_src b68286966f292fb552511b71bd8bc11af8f12c8aa760372d1437ac8760cb2f25 \ +get_src 3183450d897baa9309347c8617edc0c97c5b29ffc32bd2d12f498edf2dcbeffa \ "https://github.com/jaegertracing/jaeger-client-cpp/archive/$JAEGER_VERSION.tar.gz" -get_src 6c8a2792222f6bfad927840bf64cb890466fcca703a0133cbde0e5b808461279 \ +get_src bda49f996a73d2c6080ff0523e7b535917cd28c8a79c3a5da54fc29332d61d1e \ + "https://github.com/msgpack/msgpack-c/archive/cpp-$MSGPACK_VERSION.tar.gz" + +get_src a3d1c03e7af570fa64c01df259e6e9bb78637a6bd9c65c6bf7e8703e466dc22f \ + "https://github.com/DataDog/dd-opentracing-cpp/archive/v$DATADOG_CPP_VERSION.tar.gz" + +get_src c29183001e3ab48299deecd02fb84b799b6627817c9baa66e4b342ac81dd6b40\ + "https://github.com/opentracing/lua-bridge-tracer/archive/v$LUA_BRIDGE_TRACER_VERSION.tar.gz" + +get_src 8ff5b18f4ff75ecdb852f50ce2069213d36285fa5f584c28e03ff978fe62d99a \ "https://github.com/openresty/lua-nginx-module/archive/$LUA_NGX_VERSION.tar.gz" -get_src 5420dbf59bac52cef8021658d7eae1667a2bd14dda23602c985cae2604de77dd \ +get_src 4f3f6fa0f2b89e0f83b6881b25ed190fd5bc7d38c1db338526664c500f0dedc6 \ "https://github.com/openresty/stream-lua-nginx-module/archive/v$LUA_STREAM_NGX_VERSION.tar.gz" get_src 2a69815e4ae01aa8b170941a8e1a10b6f6a9aab699dee485d58f021dd933829a \ "https://github.com/openresty/lua-upstream-nginx-module/archive/v$LUA_UPSTREAM_VERSION.tar.gz" -get_src 2349dd0b7ee37680306ee76bc4b6bf5c7509a4a4be16d246d9bbff44f564e4a0 \ - "https://github.com/openresty/lua-resty-lrucache/archive/v0.08.tar.gz" +get_src 342709c8e55e9901a91e90cb83153ff588d88be27c4370954b5a2e470e53d26f \ + "https://github.com/openresty/lua-resty-lrucache/archive/v0.09rc1.tar.gz" -get_src bc9a00f4dd6dd3928c6e878dc84fa7a1073d5a65900cd77a5c1c7ce2d863b22a \ - "https://github.com/openresty/lua-resty-core/archive/v0.1.16rc3.tar.gz" +get_src f3bf9a35c66b00594d13b9e22c2e8f07fdaeedc6e790e2ca4675a886d5b5e4da \ + "https://github.com/openresty/lua-resty-core/archive/v0.1.16rc4.tar.gz" -get_src eaf84f58b43289c1c3e0442ada9ed40406357f203adc96e2091638080cb8d361 \ - "https://github.com/openresty/lua-resty-lock/archive/v0.07.tar.gz" +get_src 517db9add320250b770f2daac83a49e38e6131611f2daa5ff05c69d5705e9746 \ + "https://github.com/openresty/lua-resty-lock/archive/v0.08rc1.tar.gz" get_src 3917d506e2d692088f7b4035c589cc32634de4ea66e40fc51259fbae43c9258d \ "https://github.com/hamishforbes/lua-resty-iputils/archive/v0.3.0.tar.gz" @@ -189,13 +203,13 @@ get_src 4aca34f324d543754968359672dcf5f856234574ee4da360ce02c778d244572a \ get_src 095615fe94e64615c4a27f4f4475b91c047cf8d10bc2dbde8d5ba6aa625fc5ab \ "https://github.com/openresty/lua-resty-string/archive/v0.11.tar.gz" -get_src a77bf0d7cf6a9ba017d0dc973b1a58f13e48242dd3849c5e99c07d250667c44c \ - "https://github.com/openresty/lua-resty-balancer/archive/v0.02rc4.tar.gz" +get_src 89cedd6466801bfef20499689ebb34ecf17a2e60a34cd06e13c0204ea1775588 \ + "https://github.com/openresty/lua-resty-balancer/archive/v0.02rc5.tar.gz" get_src d81b33129c6fb5203b571fa4d8394823bf473d8872c0357a1d0f14420b1483bd \ "https://github.com/cloudflare/lua-resty-cookie/archive/v0.1.0.tar.gz" -get_src d04df883adb86c96a8e0fe6c404851b9c776840dbb524419c06ae3fac42c4e64 \ +get_src 69914f80665a1c26f22c66768ee2467689c615a5d3e255b3e99a1ef65c769b3d \ "https://github.com/openresty/luajit2/archive/$LUAJIT_VERSION.tar.gz" get_src c673fcee37c1c4794f921b6710b09e8a0e1e58117aa788f798507d033f737192 \ @@ -238,13 +252,13 @@ fi cd "$BUILD_PATH" luarocks install lrexlib-pcre 2.7.2-1 PCRE_LIBDIR=${PCRE_DIR} -cd "$BUILD_PATH/lua-resty-core-0.1.16rc3" +cd "$BUILD_PATH/lua-resty-core-0.1.16rc4" make install -cd "$BUILD_PATH/lua-resty-lrucache-0.08" +cd "$BUILD_PATH/lua-resty-lrucache-0.09rc1" make install -cd "$BUILD_PATH/lua-resty-lock-0.07" +cd "$BUILD_PATH/lua-resty-lock-0.08rc1" make install cd "$BUILD_PATH/lua-resty-iputils-0.3.0" @@ -259,7 +273,7 @@ make install cd "$BUILD_PATH/lua-resty-string-0.11" make install -cd "$BUILD_PATH/lua-resty-balancer-0.02rc4" +cd "$BUILD_PATH/lua-resty-balancer-0.02rc5" make all make install @@ -352,6 +366,39 @@ cmake -DCMAKE_BUILD_TYPE=Release \ make make install +# build msgpack lib +cd "$BUILD_PATH/msgpack-c-cpp-$MSGPACK_VERSION" + +mkdir .build +cd .build +cmake -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-fPIC" \ + -DBUILD_SHARED_LIBS=OFF \ + -DBUILD_TESTING=OFF \ + -DBUILD_MOCKTRACER=OFF \ + .. + +make +make install + +# build datadog lib +cd "$BUILD_PATH/dd-opentracing-cpp-$DATADOG_CPP_VERSION" + +mkdir .build +cd .build +cmake .. + +make +make install + +# build Lua bridge tracer +cd "$BUILD_PATH/lua-bridge-tracer-$LUA_BRIDGE_TRACER_VERSION" +mkdir .build +cd .build +cmake .. +make +make install + # Get Brotli source and deps cd "$BUILD_PATH" git clone --depth=1 https://github.com/google/ngx_brotli.git @@ -363,7 +410,7 @@ git submodule update cd "$BUILD_PATH" git clone -b v3/master --single-branch https://github.com/SpiderLabs/ModSecurity cd ModSecurity/ -git checkout 9ada0a28c8100f905014c128b0e6d11dd75ec7e5 +git checkout 145f2f35b751cc10ea6fe2b329f68eac20e2bb74 git submodule init git submodule update sh build.sh diff --git a/internal/ingress/annotations/annotations.go b/internal/ingress/annotations/annotations.go index 3c54e5a579..45d68c0eb7 100644 --- a/internal/ingress/annotations/annotations.go +++ b/internal/ingress/annotations/annotations.go @@ -49,6 +49,7 @@ import ( "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/redirect" "k8s.io/ingress-nginx/internal/ingress/annotations/rewrite" + "k8s.io/ingress-nginx/internal/ingress/annotations/satisfy" "k8s.io/ingress-nginx/internal/ingress/annotations/secureupstream" "k8s.io/ingress-nginx/internal/ingress/annotations/serversnippet" "k8s.io/ingress-nginx/internal/ingress/annotations/serviceupstream" @@ -79,29 +80,31 @@ type Ingress struct { CorsConfig cors.Config CustomHTTPErrors []int DefaultBackend *apiv1.Service - Denied error - ExternalAuth authreq.Config - HTTP2PushPreload bool - Proxy proxy.Config - RateLimit ratelimit.Config - Redirect redirect.Config - Rewrite rewrite.Config - SecureUpstream secureupstream.Config - ServerSnippet string - ServiceUpstream bool - SessionAffinity sessionaffinity.Config - SSLPassthrough bool - UsePortInRedirects bool - UpstreamHashBy upstreamhashby.Config - LoadBalancing string - UpstreamVhost string - Whitelist ipwhitelist.SourceRange - XForwardedPrefix bool - SSLCiphers string - Logs log.Config - LuaRestyWAF luarestywaf.Config - InfluxDB influxdb.Config - ModSecurity modsecurity.Config + //TODO: Change this back into an error when https://github.com/imdario/mergo/issues/100 is resolved + Denied *string + ExternalAuth authreq.Config + HTTP2PushPreload bool + Proxy proxy.Config + RateLimit ratelimit.Config + Redirect redirect.Config + Rewrite rewrite.Config + Satisfy string + SecureUpstream secureupstream.Config + ServerSnippet string + ServiceUpstream bool + SessionAffinity sessionaffinity.Config + SSLPassthrough bool + UsePortInRedirects bool + UpstreamHashBy upstreamhashby.Config + LoadBalancing string + UpstreamVhost string + Whitelist ipwhitelist.SourceRange + XForwardedPrefix bool + SSLCiphers string + Logs log.Config + LuaRestyWAF luarestywaf.Config + InfluxDB influxdb.Config + ModSecurity modsecurity.Config } // Extractor defines the annotation parsers to be used in the extraction of annotations @@ -129,6 +132,7 @@ func NewAnnotationExtractor(cfg resolver.Resolver) Extractor { "RateLimit": ratelimit.NewParser(cfg), "Redirect": redirect.NewParser(cfg), "Rewrite": rewrite.NewParser(cfg), + "Satisfy": satisfy.NewParser(cfg), "SecureUpstream": secureupstream.NewParser(cfg), "ServerSnippet": serversnippet.NewParser(cfg), "ServiceUpstream": serviceupstream.NewParser(cfg), @@ -179,7 +183,8 @@ func (e Extractor) Extract(ing *extensions.Ingress) *Ingress { _, alreadyDenied := data[DeniedKeyName] if !alreadyDenied { - data[DeniedKeyName] = err + errString := err.Error() + data[DeniedKeyName] = &errString klog.Errorf("error reading %v annotation in Ingress %v/%v: %v", name, ing.GetNamespace(), ing.GetName(), err) continue } diff --git a/internal/ingress/annotations/auth/main_test.go b/internal/ingress/annotations/auth/main_test.go index 3546bd0252..146ba32c28 100644 --- a/internal/ingress/annotations/auth/main_test.go +++ b/internal/ingress/annotations/auth/main_test.go @@ -30,6 +30,7 @@ import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + ing_errors "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" ) @@ -96,6 +97,42 @@ func TestIngressWithoutAuth(t *testing.T) { } } +func TestIngressAuthBadAuthType(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("auth-type")] = "invalid" + ing.SetAnnotations(data) + + _, dir, _ := dummySecretContent(t) + defer os.RemoveAll(dir) + + expected := ing_errors.NewLocationDenied("invalid authentication type") + _, err := NewParser(dir, &mockSecret{}).Parse(ing) + if err.Error() != expected.Error() { + t.Errorf("expected '%v' but got '%v'", expected, err) + } +} + +func TestInvalidIngressAuthNoSecret(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("auth-type")] = "basic" + ing.SetAnnotations(data) + + _, dir, _ := dummySecretContent(t) + defer os.RemoveAll(dir) + + expected := ing_errors.LocationDenied{ + Reason: errors.New("error reading secret name from annotation: ingress rule without annotations"), + } + _, err := NewParser(dir, &mockSecret{}).Parse(ing) + if err.Error() != expected.Reason.Error() { + t.Errorf("expected '%v' but got '%v'", expected, err) + } +} + func TestIngressAuth(t *testing.T) { ing := buildIngress() diff --git a/internal/ingress/annotations/authreq/main.go b/internal/ingress/annotations/authreq/main.go index c5cdbc334b..1cf2de2222 100644 --- a/internal/ingress/annotations/authreq/main.go +++ b/internal/ingress/annotations/authreq/main.go @@ -146,12 +146,12 @@ func (a authReq) Parse(ing *extensions.Ingress) (interface{}, error) { // Optional Parameters signIn, err := parser.GetStringAnnotation("auth-signin", ing) if err != nil { - klog.Warning("auth-signin annotation is undefined and will not be set") + klog.V(3).Infof("auth-signin annotation is undefined and will not be set") } authSnippet, err := parser.GetStringAnnotation("auth-snippet", ing) if err != nil { - klog.Warning("auth-snippet annotation is undefined and will not be set") + klog.V(3).Infof("auth-snippet annotation is undefined and will not be set") } responseHeaders := []string{} diff --git a/internal/ingress/annotations/authtls/main_test.go b/internal/ingress/annotations/authtls/main_test.go index a3613bb159..fbd0154bf6 100644 --- a/internal/ingress/annotations/authtls/main_test.go +++ b/internal/ingress/annotations/authtls/main_test.go @@ -23,6 +23,9 @@ import ( extensions "k8s.io/api/extensions/v1beta1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" ) func buildIngress() *extensions.Ingress { @@ -60,49 +63,66 @@ func buildIngress() *extensions.Ingress { } } +// mocks the resolver for authTLS +type mockSecret struct { + resolver.Mock +} + +// GetAuthCertificate from mockSecret mocks the GetAuthCertificate for authTLS +func (m mockSecret) GetAuthCertificate(name string) (*resolver.AuthSSLCert, error) { + if name != "default/demo-secret" { + return nil, errors.Errorf("there is no secret with name %v", name) + } + + return &resolver.AuthSSLCert{ + Secret: "default/demo-secret", + CAFileName: "/ssl/ca.crt", + PemSHA: "abc", + }, nil + +} + func TestAnnotations(t *testing.T) { ing := buildIngress() - data := map[string]string{} + + data[parser.GetAnnotationWithPrefix("auth-tls-secret")] = "default/demo-secret" + data[parser.GetAnnotationWithPrefix("auth-tls-verify-client")] = "off" + data[parser.GetAnnotationWithPrefix("auth-tls-verify-depth")] = "1" + data[parser.GetAnnotationWithPrefix("auth-tls-error-page")] = "ok.com/error" + data[parser.GetAnnotationWithPrefix("auth-tls-pass-certificate-to-upstream")] = "true" + ing.SetAnnotations(data) - /* - tests := []struct { - title string - url string - method string - sendBody bool - expErr bool - }{ - {"empty", "", "", false, true}, - {"no scheme", "bar", "", false, true}, - {"invalid host", "http://", "", false, true}, - {"invalid host (multiple dots)", "http://foo..bar.com", "", false, true}, - {"valid URL", "http://bar.foo.com/external-auth", "", false, false}, - {"valid URL - send body", "http://foo.com/external-auth", "POST", true, false}, - {"valid URL - send body", "http://foo.com/external-auth", "GET", true, false}, - } - - for _, test := range tests { - data[authTLSSecret] = "" - test.title - - u, err := ParseAnnotations(ing) - - if test.expErr { - if err == nil { - t.Errorf("%v: expected error but retuned nil", test.title) - } - continue - } - - if u.URL != test.url { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.url, u.URL) - } - if u.Method != test.method { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.method, u.Method) - } - if u.SendBody != test.sendBody { - t.Errorf("%v: expected \"%v\" but \"%v\" was returned", test.title, test.sendBody, u.SendBody) - } - }*/ + + fakeSecret := &mockSecret{} + i, err := NewParser(fakeSecret).Parse(ing) + if err != nil { + t.Errorf("Uxpected error with ingress: %v", err) + } + + u, ok := i.(*Config) + if !ok { + t.Errorf("expected *Config but got %v", u) + } + + secret, err := fakeSecret.GetAuthCertificate("default/demo-secret") + if err != nil { + t.Errorf("unexpected error getting secret %v", err) + } + + if u.AuthSSLCert.Secret != secret.Secret { + t.Errorf("expected %v but got %v", secret.Secret, u.AuthSSLCert.Secret) + } + if u.VerifyClient != "off" { + t.Errorf("expected %v but got %v", "off", u.VerifyClient) + } + if u.ValidationDepth != 1 { + t.Errorf("expected %v but got %v", 1, u.ValidationDepth) + } + if u.ErrorPage != "ok.com/error" { + t.Errorf("expected %v but got %v", "ok.com/error", u.ErrorPage) + } + if u.PassCertToUpstream != true { + t.Errorf("expected %v but got %v", true, u.PassCertToUpstream) + } } diff --git a/internal/ingress/annotations/backendprotocol/main_test.go b/internal/ingress/annotations/backendprotocol/main_test.go index 50a7fbcdd8..539d095627 100644 --- a/internal/ingress/annotations/backendprotocol/main_test.go +++ b/internal/ingress/annotations/backendprotocol/main_test.go @@ -42,18 +42,62 @@ func buildIngress() *extensions.Ingress { }, } } - -func TestParseAnnotations(t *testing.T) { +func TestParseInvalidAnnotations(t *testing.T) { ing := buildIngress() - _, err := NewParser(&resolver.Mock{}).Parse(ing) + // Test no annotations set + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok := i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } + + data := map[string]string{} + ing.SetAnnotations(data) + + // Test with empty annotations + i, err = NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with backend-protocol") + } + val, ok = i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } + + // Test invalid annotation set + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "INVALID" + ing.SetAnnotations(data) + + i, err = NewParser(&resolver.Mock{}).Parse(ing) if err != nil { - t.Errorf("unexpected error: %v", err) + t.Errorf("unexpected error parsing ingress with backend-protocol") } + val, ok = i.(string) + if !ok { + t.Errorf("expected a string type") + } + if val != "HTTP" { + t.Errorf("expected HTTPS but %v returned", val) + } +} + +func TestParseAnnotations(t *testing.T) { + ing := buildIngress() data := map[string]string{} data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" ing.SetAnnotations(data) + i, err := NewParser(&resolver.Mock{}).Parse(ing) if err != nil { t.Errorf("unexpected error parsing ingress with backend-protocol") diff --git a/internal/ingress/annotations/canary/main.go b/internal/ingress/annotations/canary/main.go index 2e7305934b..5645368189 100644 --- a/internal/ingress/annotations/canary/main.go +++ b/internal/ingress/annotations/canary/main.go @@ -30,10 +30,11 @@ type canary struct { // Config returns the configuration rules for setting up the Canary type Config struct { - Enabled bool - Weight int - Header string - Cookie string + Enabled bool + Weight int + Header string + HeaderValue string + Cookie string } // NewParser parses the ingress for canary related annotations @@ -62,12 +63,17 @@ func (c canary) Parse(ing *extensions.Ingress) (interface{}, error) { config.Header = "" } + config.HeaderValue, err = parser.GetStringAnnotation("canary-by-header-value", ing) + if err != nil { + config.HeaderValue = "" + } + config.Cookie, err = parser.GetStringAnnotation("canary-by-cookie", ing) if err != nil { config.Cookie = "" } - if !config.Enabled && (config.Weight > 0 || len(config.Header) > 0 || len(config.Cookie) > 0) { + if !config.Enabled && (config.Weight > 0 || len(config.Header) > 0 || len(config.HeaderValue) > 0 || len(config.Cookie) > 0) { return nil, errors.NewInvalidAnnotationConfiguration("canary", "configured but not enabled") } diff --git a/internal/ingress/annotations/canary/main_test.go b/internal/ingress/annotations/canary/main_test.go index ddac491f2d..3ad8b3d7fd 100644 --- a/internal/ingress/annotations/canary/main_test.go +++ b/internal/ingress/annotations/canary/main_test.go @@ -17,15 +17,17 @@ limitations under the License. package canary import ( + "testing" + api "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" - "testing" - "k8s.io/ingress-nginx/internal/ingress/resolver" "strconv" + + "k8s.io/ingress-nginx/internal/ingress/resolver" ) func buildIngress() *extensions.Ingress { @@ -63,6 +65,30 @@ func buildIngress() *extensions.Ingress { } } +func TestCanaryInvalid(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("Error Parsing Canary Annotations") + } + + val, ok := i.(*Config) + if !ok { + t.Errorf("Expected %v and got %v", "*Config", val) + } + if val.Enabled != false { + t.Errorf("Expected %v but got %v", false, val.Enabled) + } + if val.Weight != 0 { + t.Errorf("Expected %v but got %v", 0, val.Weight) + } + +} + func TestAnnotations(t *testing.T) { ing := buildIngress() diff --git a/internal/ingress/annotations/customhttperrors/main_test.go b/internal/ingress/annotations/customhttperrors/main_test.go new file mode 100644 index 0000000000..610165b5dc --- /dev/null +++ b/internal/ingress/annotations/customhttperrors/main_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package customhttperrors + +import ( + "reflect" + "sort" + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *extensions.Ingress { + return &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + }, + } +} + +func TestParseInvalidAnnotations(t *testing.T) { + ing := buildIngress() + + _, err := NewParser(&resolver.Mock{}).Parse(ing) + if err == nil { + t.Errorf("expected error parsing ingress with custom-http-errors") + } + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("custom-http-errors")] = "400,404,abc,502" + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err == nil { + t.Errorf("expected error parsing ingress with custom-http-errors") + } + if i != nil { + t.Errorf("expected %v but got %v", nil, i) + } +} + +func TestParseAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("custom-http-errors")] = "400,404,500,502" + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error parsing ingress with custom-http-errors") + } + val, ok := i.([]int) + if !ok { + t.Errorf("expected a []int type") + } + + expected := []int{400, 404, 500, 502} + sort.Ints(val) + + if !reflect.DeepEqual(expected, val) { + t.Errorf("expected %v but got %v", expected, val) + } +} diff --git a/internal/ingress/annotations/defaultbackend/main_test.go b/internal/ingress/annotations/defaultbackend/main_test.go new file mode 100644 index 0000000000..c344a8e035 --- /dev/null +++ b/internal/ingress/annotations/defaultbackend/main_test.go @@ -0,0 +1,105 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" + "k8s.io/ingress-nginx/internal/ingress/resolver" + + "k8s.io/apimachinery/pkg/util/intstr" +) + +func buildIngress() *extensions.Ingress { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/foo", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +type mockService struct { + resolver.Mock +} + +// GetService mocks the GetService call from the defaultbackend package +func (m mockService) GetService(name string) (*api.Service, error) { + if name != "default/demo-service" { + return nil, errors.Errorf("there is no service with name %v", name) + } + + return &api.Service{ + ObjectMeta: meta_v1.ObjectMeta{ + Namespace: api.NamespaceDefault, + Name: "demo-service", + }, + }, nil +} + +func TestAnnotations(t *testing.T) { + ing := buildIngress() + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("default-backend")] = "demo-service" + ing.SetAnnotations(data) + + fakeService := &mockService{} + i, err := NewParser(fakeService).Parse(ing) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + svc, ok := i.(*api.Service) + if !ok { + t.Errorf("expected *api.Service but got %v", svc) + } + if svc.Name != "demo-service" { + t.Errorf("expected %v but got %v", "demo-service", svc.Name) + } +} diff --git a/internal/ingress/annotations/influxdb/main_test.go b/internal/ingress/annotations/influxdb/main_test.go index b8c67ba590..a022ab66d7 100644 --- a/internal/ingress/annotations/influxdb/main_test.go +++ b/internal/ingress/annotations/influxdb/main_test.go @@ -62,6 +62,36 @@ func buildIngress() *extensions.Ingress { } } +func TestIngressInvalidInfluxDB(t *testing.T) { + ing := buildIngress() + + influx, _ := NewParser(&resolver.Mock{}).Parse(ing) + nginxInflux, ok := influx.(*Config) + if !ok { + t.Errorf("expected a Config type") + } + + if nginxInflux.InfluxDBEnabled == true { + t.Errorf("expected influxdb enabled but returned %v", nginxInflux.InfluxDBEnabled) + } + + if nginxInflux.InfluxDBMeasurement != "default" { + t.Errorf("expected measurement name not found. Found %v", nginxInflux.InfluxDBMeasurement) + } + + if nginxInflux.InfluxDBPort != "8089" { + t.Errorf("expected port not found. Found %v", nginxInflux.InfluxDBPort) + } + + if nginxInflux.InfluxDBHost != "127.0.0.1" { + t.Errorf("expected host not found. Found %v", nginxInflux.InfluxDBHost) + } + + if nginxInflux.InfluxDBServerName != "nginx-ingress" { + t.Errorf("expected server name not found. Found %v", nginxInflux.InfluxDBServerName) + } +} + func TestIngressInfluxDB(t *testing.T) { ing := buildIngress() diff --git a/internal/ingress/annotations/proxy/main.go b/internal/ingress/annotations/proxy/main.go index b8844ed606..1e3fcd49ee 100644 --- a/internal/ingress/annotations/proxy/main.go +++ b/internal/ingress/annotations/proxy/main.go @@ -29,6 +29,7 @@ type Config struct { ConnectTimeout int `json:"connectTimeout"` SendTimeout int `json:"sendTimeout"` ReadTimeout int `json:"readTimeout"` + BuffersNumber int `json:"buffersNumber"` BufferSize string `json:"bufferSize"` CookieDomain string `json:"cookieDomain"` CookiePath string `json:"cookiePath"` @@ -60,6 +61,9 @@ func (l1 *Config) Equal(l2 *Config) bool { if l1.ReadTimeout != l2.ReadTimeout { return false } + if l1.BuffersNumber != l2.BuffersNumber { + return false + } if l1.BufferSize != l2.BufferSize { return false } @@ -123,6 +127,11 @@ func (a proxy) Parse(ing *extensions.Ingress) (interface{}, error) { config.ReadTimeout = defBackend.ProxyReadTimeout } + config.BuffersNumber, err = parser.GetIntAnnotation("proxy-buffers-number", ing) + if err != nil { + config.BuffersNumber = defBackend.ProxyBuffersNumber + } + config.BufferSize, err = parser.GetStringAnnotation("proxy-buffer-size", ing) if err != nil { config.BufferSize = defBackend.ProxyBufferSize diff --git a/internal/ingress/annotations/proxy/main_test.go b/internal/ingress/annotations/proxy/main_test.go index 075047ecec..33bb6b6143 100644 --- a/internal/ingress/annotations/proxy/main_test.go +++ b/internal/ingress/annotations/proxy/main_test.go @@ -73,6 +73,7 @@ func (m mockBackend) GetDefaultBackend() defaults.Backend { ProxyConnectTimeout: 10, ProxySendTimeout: 15, ProxyReadTimeout: 20, + ProxyBuffersNumber: 4, ProxyBufferSize: "10k", ProxyBodySize: "3k", ProxyNextUpstream: "error", @@ -89,6 +90,7 @@ func TestProxy(t *testing.T) { data[parser.GetAnnotationWithPrefix("proxy-connect-timeout")] = "1" data[parser.GetAnnotationWithPrefix("proxy-send-timeout")] = "2" data[parser.GetAnnotationWithPrefix("proxy-read-timeout")] = "3" + data[parser.GetAnnotationWithPrefix("proxy-buffers-number")] = "8" data[parser.GetAnnotationWithPrefix("proxy-buffer-size")] = "1k" data[parser.GetAnnotationWithPrefix("proxy-body-size")] = "2k" data[parser.GetAnnotationWithPrefix("proxy-next-upstream")] = "off" @@ -114,6 +116,9 @@ func TestProxy(t *testing.T) { if p.ReadTimeout != 3 { t.Errorf("expected 3 as read-timeout but returned %v", p.ReadTimeout) } + if p.BuffersNumber != 8 { + t.Errorf("expected 8 as proxy-buffers-number but returned %v", p.BuffersNumber) + } if p.BufferSize != "1k" { t.Errorf("expected 1k as buffer-size but returned %v", p.BufferSize) } @@ -157,6 +162,9 @@ func TestProxyWithNoAnnotation(t *testing.T) { if p.ReadTimeout != 20 { t.Errorf("expected 20 as read-timeout but returned %v", p.ReadTimeout) } + if p.BuffersNumber != 4 { + t.Errorf("expected 4 as buffer-number but returned %v", p.BuffersNumber) + } if p.BufferSize != "10k" { t.Errorf("expected 10k as buffer-size but returned %v", p.BufferSize) } diff --git a/internal/ingress/annotations/ratelimit/main_test.go b/internal/ingress/annotations/ratelimit/main_test.go index 06ced468b8..056f09a1d0 100644 --- a/internal/ingress/annotations/ratelimit/main_test.go +++ b/internal/ingress/annotations/ratelimit/main_test.go @@ -17,6 +17,8 @@ limitations under the License. package ratelimit import ( + "reflect" + "sort" "testing" api "k8s.io/api/core/v1" @@ -83,7 +85,24 @@ func TestWithoutAnnotations(t *testing.T) { } } -func TestBadRateLimiting(t *testing.T) { +func TestParseCIDRs(t *testing.T) { + cidr, _ := parseCIDRs("invalid.com") + if cidr != nil { + t.Errorf("expected %v but got %v", nil, cidr) + } + + expected := []string{"192.0.0.1", "192.0.1.0/24"} + cidr, err := parseCIDRs("192.0.0.1, 192.0.1.0/24") + if err != nil { + t.Errorf("unexpected error %v", err) + } + sort.Strings(cidr) + if !reflect.DeepEqual(expected, cidr) { + t.Errorf("expected %v but got %v", expected, cidr) + } +} + +func TestRateLimiting(t *testing.T) { ing := buildIngress() data := map[string]string{} diff --git a/internal/ingress/annotations/redirect/redirect_test.go b/internal/ingress/annotations/redirect/redirect_test.go index 6c60341aa0..95e80b95a9 100644 --- a/internal/ingress/annotations/redirect/redirect_test.go +++ b/internal/ingress/annotations/redirect/redirect_test.go @@ -18,12 +18,15 @@ package redirect import ( "net/http" + "net/url" + "reflect" "strconv" "testing" extensions "k8s.io/api/extensions/v1beta1" "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/errors" "k8s.io/ingress-nginx/internal/ingress/resolver" ) @@ -99,3 +102,56 @@ func TestPermanentRedirectWithCustomCode(t *testing.T) { }) } } + +func TestTemporalRedirect(t *testing.T) { + rp := NewParser(resolver.Mock{}) + if rp == nil { + t.Fatalf("Expected a parser.IngressAnnotation but returned nil") + } + + ing := new(extensions.Ingress) + + data := make(map[string]string, 1) + data[parser.GetAnnotationWithPrefix("from-to-www-redirect")] = "true" + data[parser.GetAnnotationWithPrefix("temporal-redirect")] = defRedirectURL + ing.SetAnnotations(data) + + i, err := rp.Parse(ing) + if err != nil { + t.Errorf("Unexpected error with ingress: %v", err) + } + redirect, ok := i.(*Config) + if !ok { + t.Errorf("Expected a Redirect type") + } + if redirect.URL != defRedirectURL { + t.Errorf("Expected %v as redirect but returned %s", defRedirectURL, redirect.URL) + } + if redirect.Code != http.StatusFound { + t.Errorf("Expected %v as redirect to have a code %d but had %d", defRedirectURL, defaultPermanentRedirectCode, redirect.Code) + } + if redirect.FromToWWW != true { + t.Errorf("Expected %v as redirect to have from-to-www as %v but got %v", defRedirectURL, true, redirect.FromToWWW) + } +} + +func TestIsValidURL(t *testing.T) { + + invalid := "ok.com" + urlParse, err := url.Parse(invalid) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + expected := errors.Errorf("only http and https are valid protocols (%v)", urlParse.Scheme) + err = isValidURL(invalid) + if reflect.DeepEqual(expected.Error, err.Error) { + t.Errorf("expected '%v' but got '%v'", expected, err) + } + + valid := "http://ok.com" + err = isValidURL(valid) + if err != nil { + t.Errorf("expected nil but got %v", err) + } +} diff --git a/internal/ingress/annotations/satisfy/main.go b/internal/ingress/annotations/satisfy/main.go new file mode 100644 index 0000000000..531335636f --- /dev/null +++ b/internal/ingress/annotations/satisfy/main.go @@ -0,0 +1,43 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package satisfy + +import ( + extensions "k8s.io/api/extensions/v1beta1" + + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +type satisfy struct { + r resolver.Resolver +} + +// NewParser creates a new SATISFY annotation parser +func NewParser(r resolver.Resolver) parser.IngressAnnotation { + return satisfy{r} +} + +// Parse parses annotation contained in the ingress +func (s satisfy) Parse(ing *extensions.Ingress) (interface{}, error) { + satisfy, err := parser.GetStringAnnotation("satisfy", ing) + if err != nil || satisfy != "any" { + satisfy = "all" + } + + return satisfy, nil +} diff --git a/internal/ingress/annotations/satisfy/main_test.go b/internal/ingress/annotations/satisfy/main_test.go new file mode 100644 index 0000000000..52389825a6 --- /dev/null +++ b/internal/ingress/annotations/satisfy/main_test.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package satisfy + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func buildIngress() *extensions.Ingress { + defaultBackend := extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + } + + return &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "fake", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(80), + }, + Rules: []extensions.IngressRule{ + { + Host: "fake.host.com", + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: "/fake", + Backend: defaultBackend, + }, + }, + }, + }, + }, + }, + }, + } +} + +func TestSatisfyParser(t *testing.T) { + ing := buildIngress() + + data := map[string]string{ + "any": "any", + "all": "all", + "invalid": "all", + } + + annotations := map[string]string{} + + for input, expected := range data { + annotations[parser.GetAnnotationWithPrefix("satisfy")] = input + ing.SetAnnotations(annotations) + + satisfyt, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("error parsing annotations: %v", err) + } + + val, ok := satisfyt.(string) + if !ok { + t.Errorf("expected a string type but return %t", satisfyt) + } + + if val != expected { + t.Errorf("expected %v but returned %v", expected, val) + } + } +} diff --git a/internal/ingress/annotations/secureupstream/main_test.go b/internal/ingress/annotations/secureupstream/main_test.go index adee3c63ae..8acea321d2 100644 --- a/internal/ingress/annotations/secureupstream/main_test.go +++ b/internal/ingress/annotations/secureupstream/main_test.go @@ -76,6 +76,22 @@ func (cfg mockCfg) GetAuthCertificate(secret string) (*resolver.AuthSSLCert, err return nil, fmt.Errorf("secret not found: %v", secret) } +func TestNoCA(t *testing.T) { + ing := buildIngress() + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("backend-protocol")] = "HTTPS" + ing.SetAnnotations(data) + + _, err := NewParser(mockCfg{ + certs: map[string]resolver.AuthSSLCert{ + "default/secure-verify-ca": {}, + }, + }).Parse(ing) + if err != nil { + t.Errorf("Unexpected error on ingress: %v", err) + } +} + func TestAnnotations(t *testing.T) { ing := buildIngress() data := map[string]string{} diff --git a/internal/ingress/annotations/upstreamvhost/main_test.go b/internal/ingress/annotations/upstreamvhost/main_test.go new file mode 100644 index 0000000000..737d0d11d7 --- /dev/null +++ b/internal/ingress/annotations/upstreamvhost/main_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package upstreamvhost + +import ( + "testing" + + api "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/ingress/annotations/parser" + "k8s.io/ingress-nginx/internal/ingress/resolver" +) + +func TestParse(t *testing.T) { + ing := &extensions.Ingress{ + ObjectMeta: meta_v1.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + }, + Spec: extensions.IngressSpec{}, + } + + data := map[string]string{} + data[parser.GetAnnotationWithPrefix("upstream-vhost")] = "ok.com" + + ing.SetAnnotations(data) + + i, err := NewParser(&resolver.Mock{}).Parse(ing) + if err != nil { + t.Errorf("unexpected error %v", err) + } + + vhost, ok := i.(string) + if !ok { + t.Errorf("expected string but got %v", vhost) + } + if vhost != "ok.com" { + t.Errorf("expected %v but got %v", "ok.com", vhost) + } +} diff --git a/internal/ingress/controller/checker.go b/internal/ingress/controller/checker.go index 9ab499888f..b2beaa79d0 100644 --- a/internal/ingress/controller/checker.go +++ b/internal/ingress/controller/checker.go @@ -21,13 +21,13 @@ import ( "net/http" "strconv" "strings" - "time" "github.com/ncabatoff/process-exporter/proc" "github.com/pkg/errors" -) + "k8s.io/klog" -const nginxPID = "/tmp/nginx.pid" + "k8s.io/ingress-nginx/internal/nginx" +) // Name returns the healthcheck name func (n NGINXController) Name() string { @@ -36,25 +36,25 @@ func (n NGINXController) Name() string { // Check returns if the nginx healthz endpoint is returning ok (status code 200) func (n *NGINXController) Check(_ *http.Request) error { - - url := fmt.Sprintf("http://127.0.0.1:%v%v", n.cfg.ListenPorts.Status, ngxHealthPath) - timeout := n.cfg.HealthCheckTimeout - statusCode, err := simpleGet(url, timeout) + statusCode, _, err := nginx.NewGetStatusRequest(nginx.HealthPath) if err != nil { + klog.Errorf("healthcheck error: %v", err) return err } if statusCode != 200 { + klog.Errorf("healthcheck error: %v", statusCode) return fmt.Errorf("ingress controller is not healthy") } - url = fmt.Sprintf("http://127.0.0.1:%v/is-dynamic-lb-initialized", n.cfg.ListenPorts.Status) - statusCode, err = simpleGet(url, timeout) + statusCode, _, err = nginx.NewGetStatusRequest("/is-dynamic-lb-initialized") if err != nil { + klog.Errorf("healthcheck error: %v", err) return err } if statusCode != 200 { + klog.Errorf("healthcheck error: %v", statusCode) return fmt.Errorf("dynamic load balancer not started") } @@ -63,35 +63,14 @@ func (n *NGINXController) Check(_ *http.Request) error { if err != nil { return errors.Wrap(err, "unexpected error reading /proc directory") } - f, err := n.fileSystem.ReadFile(nginxPID) + f, err := n.fileSystem.ReadFile(nginx.PID) if err != nil { - return errors.Wrapf(err, "unexpected error reading %v", nginxPID) + return errors.Wrapf(err, "unexpected error reading %v", nginx.PID) } pid, err := strconv.Atoi(strings.TrimRight(string(f), "\r\n")) if err != nil { - return errors.Wrapf(err, "unexpected error reading the nginx PID from %v", nginxPID) + return errors.Wrapf(err, "unexpected error reading the nginx PID from %v", nginx.PID) } _, err = fs.NewProc(pid) - return err } - -func simpleGet(url string, timeout time.Duration) (int, error) { - client := &http.Client{ - Timeout: timeout * time.Second, - Transport: &http.Transport{DisableKeepAlives: true}, - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return -1, err - } - - res, err := client.Do(req) - if err != nil { - return -1, err - } - defer res.Body.Close() - - return res.StatusCode, nil -} diff --git a/internal/ingress/controller/checker_test.go b/internal/ingress/controller/checker_test.go index 4e2385cf4f..290c12ea15 100644 --- a/internal/ingress/controller/checker_test.go +++ b/internal/ingress/controller/checker_test.go @@ -21,6 +21,7 @@ import ( "net" "net/http" "net/http/httptest" + "os" "os/exec" "testing" @@ -29,27 +30,37 @@ import ( "k8s.io/ingress-nginx/internal/file" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" + "k8s.io/ingress-nginx/internal/nginx" ) func TestNginxCheck(t *testing.T) { mux := http.NewServeMux() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "ok") - })) + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "ok") + }), + }, + } defer server.Close() - // port to be used in the check - p := server.Listener.Addr().(*net.TCPAddr).Port + server.Start() // mock filesystem - fs := filesystem.NewFakeFs() + fs := filesystem.DefaultFs{} n := &NGINXController{ cfg: &Configuration{ - ListenPorts: &ngx_config.ListenPorts{ - Status: p, - }, + ListenPorts: &ngx_config.ListenPorts{}, }, fileSystem: fs, } @@ -62,7 +73,7 @@ func TestNginxCheck(t *testing.T) { // create pid file fs.MkdirAll("/tmp", file.ReadWriteByUser) - pidFile, err := fs.Create(nginxPID) + pidFile, err := fs.Create(nginx.PID) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -102,20 +113,14 @@ func TestNginxCheck(t *testing.T) { t.Error("expected an error but none returned") } }) - - t.Run("invalid port", func(t *testing.T) { - n.cfg.ListenPorts.Status = 9000 - if err := callHealthz(true, mux); err == nil { - t.Error("expected an error but none returned") - } - }) } func callHealthz(expErr bool, mux *http.ServeMux) error { - req, err := http.NewRequest("GET", "http://localhost:8080/healthz", nil) + req, err := http.NewRequest("GET", "/healthz", nil) if err != nil { - return err + return fmt.Errorf("healthz error: %v", err) } + w := httptest.NewRecorder() mux.ServeHTTP(w, req) diff --git a/internal/ingress/controller/config/config.go b/internal/ingress/controller/config/config.go index ac1551f4bb..4378819fd8 100644 --- a/internal/ingress/controller/config/config.go +++ b/internal/ingress/controller/config/config.go @@ -94,6 +94,15 @@ type Configuration struct { // By default this is disabled AllowBackendServerHeader bool `json:"allow-backend-server-header"` + // AccessLogParams sets additionals params for access_log + // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + // By default it's empty + AccessLogParams string `json:"access-log-params,omitempty"` + + // EnableAccessLogForDefaultBackend enable access_log for default backend + // By default this is disabled + EnableAccessLogForDefaultBackend bool `json:"enable-access-log-for-default-backend"` + // AccessLogPath sets the path of the access logs if enabled // http://nginx.org/en/docs/http/ngx_http_log_module.html#access_log // By default access logs go to /var/log/nginx/access.log @@ -486,6 +495,21 @@ type Configuration struct { // Default: 1 JaegerSamplerParam string `json:"jaeger-sampler-param"` + // DatadogCollectorHost specifies the datadog agent host to use when uploading traces + DatadogCollectorHost string `json:"datadog-collector-host"` + + // DatadogCollectorPort specifies the port to use when uploading traces + // Default: 8126 + DatadogCollectorPort int `json:"datadog-collector-port"` + + // DatadogServiceName specifies the service name to use for any traces created + // Default: nginx + DatadogServiceName string `json:"datadog-service-name"` + + // DatadogOperationNameOverride overrides the operation naem to use for any traces crated + // Default: nginx.handle + DatadogOperationNameOverride string `json:"datadog-operation-name-override"` + // MainSnippet adds custom configuration to the main section of the nginx configuration MainSnippet string `json:"main-snippet"` @@ -575,81 +599,84 @@ func NewDefault() Configuration { defProxyDeadlineDuration := time.Duration(5) * time.Second cfg := Configuration{ - AllowBackendServerHeader: false, - AccessLogPath: "/var/log/nginx/access.log", - WorkerCPUAffinity: "", - ErrorLogPath: "/var/log/nginx/error.log", - BlockCIDRs: defBlockEntity, - BlockUserAgents: defBlockEntity, - BlockReferers: defBlockEntity, - BrotliLevel: 4, - BrotliTypes: brotliTypes, - ClientHeaderBufferSize: "1k", - ClientHeaderTimeout: 60, - ClientBodyBufferSize: "8k", - ClientBodyTimeout: 60, - EnableDynamicTLSRecords: true, - EnableUnderscoresInHeaders: false, - ErrorLogLevel: errorLevel, - UseForwardedHeaders: false, - ForwardedForHeader: "X-Forwarded-For", - ComputeFullForwardedFor: false, - ProxyAddOriginalURIHeader: true, - GenerateRequestID: true, - HTTP2MaxFieldSize: "4k", - HTTP2MaxHeaderSize: "16k", - HTTP2MaxRequests: 1000, - HTTPRedirectCode: 308, - HSTS: true, - HSTSIncludeSubdomains: true, - HSTSMaxAge: hstsMaxAge, - HSTSPreload: false, - IgnoreInvalidHeaders: true, - GzipLevel: 5, - GzipTypes: gzipTypes, - KeepAlive: 75, - KeepAliveRequests: 100, - LargeClientHeaderBuffers: "4 8k", - LogFormatEscapeJSON: false, - LogFormatStream: logFormatStream, - LogFormatUpstream: logFormatUpstream, - EnableMultiAccept: true, - MaxWorkerConnections: 16384, - MaxWorkerOpenFiles: 0, - MapHashBucketSize: 64, - NginxStatusIpv4Whitelist: defNginxStatusIpv4Whitelist, - NginxStatusIpv6Whitelist: defNginxStatusIpv6Whitelist, - ProxyRealIPCIDR: defIPCIDR, - ProxyProtocolHeaderTimeout: defProxyDeadlineDuration, - ServerNameHashMaxSize: 1024, - ProxyHeadersHashMaxSize: 512, - ProxyHeadersHashBucketSize: 64, - ProxyStreamResponses: 1, - ReusePort: true, - ShowServerTokens: true, - SSLBufferSize: sslBufferSize, - SSLCiphers: sslCiphers, - SSLECDHCurve: "auto", - SSLProtocols: sslProtocols, - SSLSessionCache: true, - SSLSessionCacheSize: sslSessionCacheSize, - SSLSessionTickets: true, - SSLSessionTimeout: sslSessionTimeout, - EnableBrotli: false, - UseGzip: true, - UseGeoIP: true, - UseGeoIP2: false, - WorkerProcesses: strconv.Itoa(runtime.NumCPU()), - WorkerShutdownTimeout: "10s", - VariablesHashBucketSize: 128, - VariablesHashMaxSize: 2048, - UseHTTP2: true, - ProxyStreamTimeout: "600s", + AllowBackendServerHeader: false, + AccessLogPath: "/var/log/nginx/access.log", + AccessLogParams: "", + EnableAccessLogForDefaultBackend: false, + WorkerCPUAffinity: "", + ErrorLogPath: "/var/log/nginx/error.log", + BlockCIDRs: defBlockEntity, + BlockUserAgents: defBlockEntity, + BlockReferers: defBlockEntity, + BrotliLevel: 4, + BrotliTypes: brotliTypes, + ClientHeaderBufferSize: "1k", + ClientHeaderTimeout: 60, + ClientBodyBufferSize: "8k", + ClientBodyTimeout: 60, + EnableDynamicTLSRecords: true, + EnableUnderscoresInHeaders: false, + ErrorLogLevel: errorLevel, + UseForwardedHeaders: false, + ForwardedForHeader: "X-Forwarded-For", + ComputeFullForwardedFor: false, + ProxyAddOriginalURIHeader: true, + GenerateRequestID: true, + HTTP2MaxFieldSize: "4k", + HTTP2MaxHeaderSize: "16k", + HTTP2MaxRequests: 1000, + HTTPRedirectCode: 308, + HSTS: true, + HSTSIncludeSubdomains: true, + HSTSMaxAge: hstsMaxAge, + HSTSPreload: false, + IgnoreInvalidHeaders: true, + GzipLevel: 5, + GzipTypes: gzipTypes, + KeepAlive: 75, + KeepAliveRequests: 100, + LargeClientHeaderBuffers: "4 8k", + LogFormatEscapeJSON: false, + LogFormatStream: logFormatStream, + LogFormatUpstream: logFormatUpstream, + EnableMultiAccept: true, + MaxWorkerConnections: 16384, + MaxWorkerOpenFiles: 0, + MapHashBucketSize: 64, + NginxStatusIpv4Whitelist: defNginxStatusIpv4Whitelist, + NginxStatusIpv6Whitelist: defNginxStatusIpv6Whitelist, + ProxyRealIPCIDR: defIPCIDR, + ProxyProtocolHeaderTimeout: defProxyDeadlineDuration, + ServerNameHashMaxSize: 1024, + ProxyHeadersHashMaxSize: 512, + ProxyHeadersHashBucketSize: 64, + ProxyStreamResponses: 1, + ReusePort: true, + ShowServerTokens: true, + SSLBufferSize: sslBufferSize, + SSLCiphers: sslCiphers, + SSLECDHCurve: "auto", + SSLProtocols: sslProtocols, + SSLSessionCache: true, + SSLSessionCacheSize: sslSessionCacheSize, + SSLSessionTickets: true, + SSLSessionTimeout: sslSessionTimeout, + EnableBrotli: false, + UseGzip: true, + UseGeoIP: true, + UseGeoIP2: false, + WorkerProcesses: strconv.Itoa(runtime.NumCPU()), + WorkerShutdownTimeout: "10s", + VariablesHashBucketSize: 128, + VariablesHashMaxSize: 2048, + UseHTTP2: true, + ProxyStreamTimeout: "600s", Backend: defaults.Backend{ ProxyBodySize: bodySize, ProxyConnectTimeout: 5, ProxyReadTimeout: 60, ProxySendTimeout: 60, + ProxyBuffersNumber: 4, ProxyBufferSize: "4k", ProxyCookieDomain: "off", ProxyCookiePath: "off", @@ -679,6 +706,9 @@ func NewDefault() Configuration { JaegerServiceName: "nginx", JaegerSamplerType: "const", JaegerSamplerParam: "1", + DatadogServiceName: "nginx", + DatadogCollectorPort: 8126, + DatadogOperationNameOverride: "nginx.handle", LimitReqStatusCode: 503, LimitConnStatusCode: 503, SyslogPort: 514, @@ -715,7 +745,6 @@ type TemplateConfig struct { TCPBackends []ingress.L4Service UDPBackends []ingress.L4Service HealthzURI string - CustomErrors bool Cfg Configuration IsIPV6Enabled bool IsSSLPassthroughEnabled bool @@ -726,6 +755,11 @@ type TemplateConfig struct { PublishService *apiv1.Service DynamicCertificatesEnabled bool EnableMetrics bool + + PID string + StatusSocket string + StatusPath string + StreamSocket string } // ListenPorts describe the ports required to run the @@ -733,7 +767,6 @@ type TemplateConfig struct { type ListenPorts struct { HTTP int HTTPS int - Status int Health int Default int SSLProxy int diff --git a/internal/ingress/controller/controller.go b/internal/ingress/controller/controller.go index f180e905e9..728e8b3a6e 100644 --- a/internal/ingress/controller/controller.go +++ b/internal/ingress/controller/controller.go @@ -18,6 +18,7 @@ package controller import ( "fmt" + "k8s.io/ingress-nginx/internal/ingress/annotations/log" "sort" "strconv" "strings" @@ -65,7 +66,6 @@ type Configuration struct { // +optional UDPConfigMapName string - DefaultHealthzURL string HealthCheckTimeout time.Duration DefaultSSLCertificate string @@ -195,10 +195,8 @@ func (n *NGINXController) syncIngress(interface{}) error { isFirstSync := n.runningConfig.Equal(&ingress.Configuration{}) if isFirstSync { - // For the initial sync it always takes some time for NGINX to - // start listening on the configured port (default 18080) - // For large configurations it might take a while so we loop - // and back off + // For the initial sync it always takes some time for NGINX to start listening + // For large configurations it might take a while so we loop and back off klog.Info("Initial sync, sleeping for 1 second.") time.Sleep(1 * time.Second) } @@ -211,7 +209,7 @@ func (n *NGINXController) syncIngress(interface{}) error { } err := wait.ExponentialBackoff(retry, func() (bool, error) { - err := configureDynamically(pcfg, n.cfg.ListenPorts.Status, n.cfg.DynamicCertificatesEnabled) + err := configureDynamically(pcfg, n.cfg.DynamicCertificatesEnabled) if err == nil { klog.V(2).Infof("Dynamic reconfiguration succeeded.") return true, nil @@ -255,7 +253,6 @@ func (n *NGINXController) getStreamServices(configmapName string, proto apiv1.Pr n.cfg.ListenPorts.HTTP, n.cfg.ListenPorts.HTTPS, n.cfg.ListenPorts.SSLProxy, - n.cfg.ListenPorts.Status, n.cfg.ListenPorts.Health, n.cfg.ListenPorts.Default, } @@ -488,6 +485,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in loc.BackendProtocol = anns.BackendProtocol loc.CustomHTTPErrors = anns.CustomHTTPErrors loc.ModSecurity = anns.ModSecurity + loc.Satisfy = anns.Satisfy if loc.Redirect.FromToWWW { server.RedirectFromToWWW = true @@ -530,6 +528,7 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in BackendProtocol: anns.BackendProtocol, CustomHTTPErrors: anns.CustomHTTPErrors, ModSecurity: anns.ModSecurity, + Satisfy: anns.Satisfy, } if loc.Redirect.FromToWWW { @@ -583,24 +582,25 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in isHTTPSfrom := []*ingress.Server{} for _, server := range servers { for _, location := range server.Locations { - if upstream.Name == location.Backend { - if len(upstream.Endpoints) == 0 { - klog.V(3).Infof("Upstream %q has no active Endpoint", upstream.Name) - // check if the location contains endpoints and a custom default backend - if location.DefaultBackend != nil { - sp := location.DefaultBackend.Spec.Ports[0] - endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) - if len(endps) > 0 { - klog.V(3).Infof("Using custom default backend for location %q in server %q (Service \"%v/%v\")", - location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) - - nb := upstream.DeepCopy() - name := fmt.Sprintf("custom-default-backend-%v", upstream.Name) - nb.Name = name - nb.Endpoints = endps - aUpstreams = append(aUpstreams, nb) - location.Backend = name - } + if shouldCreateUpstreamForLocationDefaultBackend(upstream, location) { + sp := location.DefaultBackend.Spec.Ports[0] + endps := getEndpoints(location.DefaultBackend, &sp, apiv1.ProtocolTCP, n.store.GetServiceEndpoints) + if len(endps) > 0 { + + name := fmt.Sprintf("custom-default-backend-%v", location.DefaultBackend.GetName()) + klog.V(3).Infof("Creating \"%v\" upstream based on default backend annotation", name) + + nb := upstream.DeepCopy() + nb.Name = name + nb.Endpoints = endps + aUpstreams = append(aUpstreams, nb) + location.DefaultBackendUpstreamName = name + + if len(upstream.Endpoints) == 0 { + klog.V(3).Infof("Upstream %q has no active Endpoint, so using custom default backend for location %q in server %q (Service \"%v/%v\")", + upstream.Name, location.Path, server.Hostname, location.DefaultBackend.Namespace, location.DefaultBackend.Name) + + location.Backend = name } } @@ -613,6 +613,8 @@ func (n *NGINXController) getBackendServers(ingresses []*ingress.Ingress) ([]*in isHTTPSfrom = append(isHTTPSfrom, server) } } + } else { + location.DefaultBackendUpstreamName = "upstream-default-backend" } } } @@ -690,9 +692,10 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if anns.Canary.Enabled { upstreams[defBackend].NoServer = true upstreams[defBackend].TrafficShapingPolicy = ingress.TrafficShapingPolicy{ - Weight: anns.Canary.Weight, - Header: anns.Canary.Header, - Cookie: anns.Canary.Cookie, + Weight: anns.Canary.Weight, + Header: anns.Canary.Header, + HeaderValue: anns.Canary.HeaderValue, + Cookie: anns.Canary.Cookie, } } @@ -757,9 +760,10 @@ func (n *NGINXController) createUpstreams(data []*ingress.Ingress, du *ingress.B if anns.Canary.Enabled { upstreams[name].NoServer = true upstreams[name].TrafficShapingPolicy = ingress.TrafficShapingPolicy{ - Weight: anns.Canary.Weight, - Header: anns.Canary.Header, - Cookie: anns.Canary.Cookie, + Weight: anns.Canary.Weight, + Header: anns.Canary.Header, + HeaderValue: anns.Canary.HeaderValue, + Cookie: anns.Canary.Cookie, } } @@ -889,6 +893,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, ConnectTimeout: bdef.ProxyConnectTimeout, SendTimeout: bdef.ProxySendTimeout, ReadTimeout: bdef.ProxyReadTimeout, + BuffersNumber: bdef.ProxyBuffersNumber, BufferSize: bdef.ProxyBufferSize, CookieDomain: bdef.ProxyCookieDomain, CookiePath: bdef.ProxyCookiePath, @@ -924,6 +929,10 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, Backend: du.Name, Proxy: ngxProxy, Service: du.Service, + Logs: log.Config{ + Access: n.store.GetBackendConfiguration().EnableAccessLogForDefaultBackend, + Rewrite: false, + }, }, }} @@ -950,7 +959,7 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, // special "catch all" case, Ingress with a backend but no rule defLoc := servers[defServerName].Locations[0] if defLoc.IsDefBackend && len(ing.Spec.Rules) == 0 { - klog.Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", + klog.V(2).Infof("Ingress %q defines a backend but no rule. Using it to configure the catch-all server %q", ingKey, defServerName) defLoc.IsDefBackend = false @@ -998,11 +1007,32 @@ func (n *NGINXController) createServers(data []*ingress.Ingress, Hostname: host, Locations: []*ingress.Location{ { - Path: rootLocation, - IsDefBackend: true, - Backend: un, - Proxy: ngxProxy, - Service: &apiv1.Service{}, + Path: rootLocation, + IsDefBackend: true, + Backend: un, + Service: &apiv1.Service{}, + BasicDigestAuth: anns.BasicDigestAuth, + ClientBodyBufferSize: anns.ClientBodyBufferSize, + ConfigurationSnippet: anns.ConfigurationSnippet, + CorsConfig: anns.CorsConfig, + ExternalAuth: anns.ExternalAuth, + Proxy: anns.Proxy, + RateLimit: anns.RateLimit, + Redirect: anns.Redirect, + Rewrite: anns.Rewrite, + UpstreamVhost: anns.UpstreamVhost, + Whitelist: anns.Whitelist, + Denied: anns.Denied, + XForwardedPrefix: anns.XForwardedPrefix, + UsePortInRedirects: anns.UsePortInRedirects, + Connection: anns.Connection, + Logs: anns.Logs, + LuaRestyWAF: anns.LuaRestyWAF, + InfluxDB: anns.InfluxDB, + DefaultBackend: anns.DefaultBackend, + BackendProtocol: anns.BackendProtocol, + CustomHTTPErrors: anns.CustomHTTPErrors, + ModSecurity: anns.ModSecurity, }, }, SSLPassthrough: anns.SSLPassthrough, @@ -1175,7 +1205,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres priUps := upstreams[loc.Backend] if canMergeBackend(priUps, altUps) { - klog.Infof("matching backend %v found for alternative backend %v", + klog.V(2).Infof("matching backend %v found for alternative backend %v", priUps.Name, altUps.Name) merged = mergeAlternativeBackend(priUps, altUps) @@ -1210,7 +1240,7 @@ func mergeAlternativeBackends(ing *ingress.Ingress, upstreams map[string]*ingres priUps := upstreams[loc.Backend] if canMergeBackend(priUps, altUps) && loc.Path == path.Path { - klog.Infof("matching backend %v found for alternative backend %v", + klog.V(2).Infof("matching backend %v found for alternative backend %v", priUps.Name, altUps.Name) merged = mergeAlternativeBackend(priUps, altUps) @@ -1325,3 +1355,10 @@ func getRemovedIngresses(rucfg, newcfg *ingress.Configuration) []string { return oldIngresses.Difference(newIngresses).List() } + +// checks conditions for whether or not an upstream should be created for a custom default backend +func shouldCreateUpstreamForLocationDefaultBackend(upstream *ingress.Backend, location *ingress.Location) bool { + return (upstream.Name == location.Backend) && + (len(upstream.Endpoints) == 0 || len(location.CustomHTTPErrors) != 0) && + location.DefaultBackend != nil +} diff --git a/internal/ingress/controller/nginx.go b/internal/ingress/controller/nginx.go index 6ce3fb3cd2..b35c2912fe 100644 --- a/internal/ingress/controller/nginx.go +++ b/internal/ingress/controller/nginx.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io/ioutil" + "math" "net" "net/http" "os" @@ -48,7 +49,6 @@ import ( "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" - "k8s.io/ingress-nginx/internal/ingress/annotations" "k8s.io/ingress-nginx/internal/ingress/annotations/class" ngx_config "k8s.io/ingress-nginx/internal/ingress/controller/config" "k8s.io/ingress-nginx/internal/ingress/controller/process" @@ -60,14 +60,13 @@ import ( ing_net "k8s.io/ingress-nginx/internal/net" "k8s.io/ingress-nginx/internal/net/dns" "k8s.io/ingress-nginx/internal/net/ssl" + "k8s.io/ingress-nginx/internal/nginx" "k8s.io/ingress-nginx/internal/task" "k8s.io/ingress-nginx/internal/watch" ) const ( - ngxHealthPath = "/healthz" - nginxStreamSocket = "/tmp/ingress-stream.sock" - tempNginxPattern = "nginx-cfg" + tempNginxPattern = "nginx-cfg" ) var ( @@ -133,9 +132,6 @@ func NewNGINXController(config *Configuration, mc metric.Collector, fs file.File config.DisableCatchAll) n.syncQueue = task.NewTaskQueue(n.syncIngress) - - n.annotations = annotations.NewAnnotationExtractor(n.store) - if config.UpdateStatus { n.syncStatus = status.NewStatusSyncer(status.Config{ Client: config.Client, @@ -221,8 +217,6 @@ Error loading new template: %v type NGINXController struct { cfg *Configuration - annotations annotations.Extractor - recorder record.EventRecorder syncQueue *task.Queue @@ -527,7 +521,7 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { if err != nil { wp = 1 } - maxOpenFiles := (sysctlFSFileMax() / wp) - 1024 + maxOpenFiles := (rlimitMaxNumFiles() / wp) - 1024 klog.V(3).Infof("Maximum number of open file descriptors: %d", maxOpenFiles) if maxOpenFiles < 1024 { // this means the value of RLIMIT_NOFILE is too low. @@ -538,8 +532,9 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { } if cfg.MaxWorkerConnections == 0 { - klog.V(3).Infof("Adjusting MaxWorkerConnections variable to %d", cfg.MaxWorkerOpenFiles) - cfg.MaxWorkerConnections = cfg.MaxWorkerOpenFiles + maxWorkerConnections := int(math.Ceil(float64(cfg.MaxWorkerOpenFiles * 3.0 / 4))) + klog.V(3).Infof("Adjusting MaxWorkerConnections variable to %d", maxWorkerConnections) + cfg.MaxWorkerConnections = maxWorkerConnections } setHeaders := map[string]string{} @@ -595,8 +590,6 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { Servers: ingressCfg.Servers, TCPBackends: ingressCfg.TCPEndpoints, UDPBackends: ingressCfg.UDPEndpoints, - HealthzURI: ngxHealthPath, - CustomErrors: len(cfg.CustomHTTPErrors) > 0, Cfg: cfg, IsIPV6Enabled: n.isIPV6Enabled && !cfg.DisableIpv6, NginxStatusIpv4Whitelist: cfg.NginxStatusIpv4Whitelist, @@ -607,6 +600,12 @@ func (n *NGINXController) OnUpdate(ingressCfg ingress.Configuration) error { PublishService: n.GetPublishService(), DynamicCertificatesEnabled: n.cfg.DynamicCertificatesEnabled, EnableMetrics: n.cfg.EnableMetrics, + + HealthzURI: nginx.HealthPath, + PID: nginx.PID, + StatusSocket: nginx.StatusSocket, + StatusPath: nginx.StatusPath, + StreamSocket: nginx.StreamSocket, } tc.Cfg.Checksum = ingressCfg.ConfigurationChecksum @@ -751,6 +750,33 @@ func clearCertificates(config *ingress.Configuration) { config.Servers = clearedServers } +// Helper function to clear endpoints from the ingress configuration since they should be ignored when +// checking if the new configuration changes can be applied dynamically. +func clearL4serviceEndpoints(config *ingress.Configuration) { + var clearedTCPL4Services []ingress.L4Service + var clearedUDPL4Services []ingress.L4Service + for _, service := range config.TCPEndpoints { + copyofService := ingress.L4Service{ + Port: service.Port, + Backend: service.Backend, + Endpoints: []ingress.Endpoint{}, + Service: nil, + } + clearedTCPL4Services = append(clearedTCPL4Services, copyofService) + } + for _, service := range config.UDPEndpoints { + copyofService := ingress.L4Service{ + Port: service.Port, + Backend: service.Backend, + Endpoints: []ingress.Endpoint{}, + Service: nil, + } + clearedUDPL4Services = append(clearedUDPL4Services, copyofService) + } + config.TCPEndpoints = clearedTCPL4Services + config.UDPEndpoints = clearedUDPL4Services +} + // IsDynamicConfigurationEnough returns whether a Configuration can be // dynamically applied, without reloading the backend. func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configuration) bool { @@ -759,6 +785,10 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati copyOfRunningConfig.Backends = []*ingress.Backend{} copyOfPcfg.Backends = []*ingress.Backend{} + + clearL4serviceEndpoints(©OfRunningConfig) + clearL4serviceEndpoints(©OfPcfg) + copyOfRunningConfig.ControllerPodsCount = 0 copyOfPcfg.ControllerPodsCount = 0 @@ -772,7 +802,7 @@ func (n *NGINXController) IsDynamicConfigurationEnough(pcfg *ingress.Configurati // configureDynamically encodes new Backends in JSON format and POSTs the // payload to an internal HTTP endpoint handled by Lua. -func configureDynamically(pcfg *ingress.Configuration, port int, isDynamicCertificatesEnabled bool) error { +func configureDynamically(pcfg *ingress.Configuration, isDynamicCertificatesEnabled bool) error { backends := make([]*ingress.Backend, len(pcfg.Backends)) for i, backend := range pcfg.Backends { @@ -805,12 +835,15 @@ func configureDynamically(pcfg *ingress.Configuration, port int, isDynamicCertif backends[i] = luaBackend } - url := fmt.Sprintf("http://localhost:%d/configuration/backends", port) - err := post(url, backends) + statusCode, _, err := nginx.NewPostStatusRequest("/configuration/backends", "application/json", backends) if err != nil { return err } + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) + } + streams := make([]ingress.Backend, 0) for _, ep := range pcfg.TCPEndpoints { var service *apiv1.Service @@ -846,16 +879,19 @@ func configureDynamically(pcfg *ingress.Configuration, port int, isDynamicCertif return err } - url = fmt.Sprintf("http://localhost:%d/configuration/general", port) - err = post(url, &ingress.GeneralConfig{ + statusCode, _, err = nginx.NewPostStatusRequest("/configuration/general", "application/json", ingress.GeneralConfig{ ControllerPodsCount: pcfg.ControllerPodsCount, }) if err != nil { return err } + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) + } + if isDynamicCertificatesEnabled { - err = configureCertificates(pcfg, port) + err = configureCertificates(pcfg) if err != nil { return err } @@ -865,7 +901,7 @@ func configureDynamically(pcfg *ingress.Configuration, port int, isDynamicCertif } func updateStreamConfiguration(streams []ingress.Backend) error { - conn, err := net.Dial("unix", nginxStreamSocket) + conn, err := net.Dial("unix", nginx.StreamSocket) if err != nil { return err } @@ -890,7 +926,7 @@ func updateStreamConfiguration(streams []ingress.Backend) error { // configureCertificates JSON encodes certificates and POSTs it to an internal HTTP endpoint // that is handled by Lua -func configureCertificates(pcfg *ingress.Configuration, port int) error { +func configureCertificates(pcfg *ingress.Configuration) error { var servers []*ingress.Server for _, server := range pcfg.Servers { @@ -902,30 +938,13 @@ func configureCertificates(pcfg *ingress.Configuration, port int) error { }) } - url := fmt.Sprintf("http://localhost:%d/configuration/servers", port) - return post(url, servers) -} - -func post(url string, data interface{}) error { - buf, err := json.Marshal(data) + statusCode, _, err := nginx.NewPostStatusRequest("/configuration/servers", "application/json", servers) if err != nil { return err } - klog.V(2).Infof("Posting to %s", url) - resp, err := http.Post(url, "application/json", bytes.NewReader(buf)) - if err != nil { - return err - } - - defer func() { - if err := resp.Body.Close(); err != nil { - klog.Warningf("Error while closing response body:\n%v", err) - } - }() - - if resp.StatusCode != http.StatusCreated { - return fmt.Errorf("unexpected error code: %d", resp.StatusCode) + if statusCode != http.StatusCreated { + return fmt.Errorf("unexpected error code: %d", statusCode) } return nil @@ -949,6 +968,13 @@ const jaegerTmpl = `{ } }` +const datadogTmpl = `{ + "service": "{{ .DatadogServiceName }}", + "agent_host": "{{ .DatadogCollectorHost }}", + "agent_port": {{ .DatadogCollectorPort }}, + "operation_name_override": "{{ .DatadogOperationNameOverride }}" +}` + func createOpentracingCfg(cfg ngx_config.Configuration) error { var tmpl *template.Template var err error @@ -959,7 +985,12 @@ func createOpentracingCfg(cfg ngx_config.Configuration) error { return err } } else if cfg.JaegerCollectorHost != "" { - tmpl, err = template.New("jarger").Parse(jaegerTmpl) + tmpl, err = template.New("jaeger").Parse(jaegerTmpl) + if err != nil { + return err + } + } else if cfg.DatadogCollectorHost != "" { + tmpl, err = template.New("datadog").Parse(datadogTmpl) if err != nil { return err } @@ -973,7 +1004,10 @@ func createOpentracingCfg(cfg ngx_config.Configuration) error { return err } - return ioutil.WriteFile("/etc/nginx/opentracing.json", tmplBuf.Bytes(), file.ReadWriteByUser) + // Expand possible environment variables before writing the configuration to file. + expanded := os.ExpandEnv(string(tmplBuf.Bytes())) + + return ioutil.WriteFile("/etc/nginx/opentracing.json", []byte(expanded), file.ReadWriteByUser) } func cleanTempNginxCfg() error { diff --git a/internal/ingress/controller/nginx_test.go b/internal/ingress/controller/nginx_test.go index 3d17981a24..1de35f97db 100644 --- a/internal/ingress/controller/nginx_test.go +++ b/internal/ingress/controller/nginx_test.go @@ -32,6 +32,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/ingress-nginx/internal/ingress" + "k8s.io/ingress-nginx/internal/nginx" ) func TestIsDynamicConfigurationEnough(t *testing.T) { @@ -149,32 +150,62 @@ func TestIsDynamicConfigurationEnough(t *testing.T) { } } -func mockUnixSocket(t *testing.T) net.Listener { - l, err := net.Listen("unix", nginxStreamSocket) +func TestConfigureDynamically(t *testing.T) { + listener, err := net.Listen("unix", nginx.StatusSocket) if err != nil { - t.Fatalf("unexpected error creating unix socket: %v", err) + t.Errorf("crating unix listener: %s", err) } - if l == nil { - t.Fatalf("expected a listener but none returned") + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + streamListener, err := net.Listen("unix", nginx.StreamSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) } + defer streamListener.Close() + defer os.Remove(nginx.StreamSocket) - go func() { - for { - conn, err := l.Accept() - if err != nil { - continue - } + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) - time.Sleep(100 * time.Millisecond) - defer conn.Close() - } - }() + if r.Method != "POST" { + t.Errorf("expected a 'POST' request, got '%s'", r.Method) + } - return l -} -func TestConfigureDynamically(t *testing.T) { - l := mockUnixSocket(t) - defer l.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil && err != io.EOF { + t.Fatal(err) + } + body := string(b) + + switch r.URL.Path { + case "/configuration/backends": + { + if strings.Contains(body, "target") { + t.Errorf("unexpected target reference in JSON content: %v", body) + } + + if !strings.Contains(body, "service") { + t.Errorf("service reference should be present in JSON content: %v", body) + } + } + case "/configuration/general": + { + if !strings.Contains(body, "controllerPodsCount") { + t.Errorf("controllerPodsCount should be present in JSON content: %v", body) + } + } + default: + t.Errorf("unknown request to %s", r.URL.Path) + } + }), + }, + } + defer server.Close() + server.Start() target := &apiv1.ObjectReference{} @@ -212,46 +243,7 @@ func TestConfigureDynamically(t *testing.T) { ControllerPodsCount: 2, } - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusCreated) - - if r.Method != "POST" { - t.Errorf("expected a 'POST' request, got '%s'", r.Method) - } - - b, err := ioutil.ReadAll(r.Body) - if err != nil && err != io.EOF { - t.Fatal(err) - } - body := string(b) - - switch r.URL.Path { - case "/configuration/backends": - { - if strings.Contains(body, "target") { - t.Errorf("unexpected target reference in JSON content: %v", body) - } - - if !strings.Contains(body, "service") { - t.Errorf("service reference should be present in JSON content: %v", body) - } - } - case "/configuration/general": - { - if !strings.Contains(body, "controllerPodsCount") { - t.Errorf("controllerPodsCount should be present in JSON content: %v", body) - } - } - default: - t.Errorf("unknown request to %s", r.URL.Path) - } - - })) - - port := ts.Listener.Addr().(*net.TCPAddr).Port - defer ts.Close() - - err := configureDynamically(commonConfig, port, false) + err = configureDynamically(commonConfig, false) if err != nil { t.Errorf("unexpected error posting dynamic configuration: %v", err) } @@ -262,6 +254,19 @@ func TestConfigureDynamically(t *testing.T) { } func TestConfigureCertificates(t *testing.T) { + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer listener.Close() + defer os.Remove(nginx.StatusSocket) + + streamListener, err := net.Listen("unix", nginx.StreamSocket) + if err != nil { + t.Errorf("crating unix listener: %s", err) + } + defer streamListener.Close() + defer os.Remove(nginx.StreamSocket) servers := []*ingress.Server{{ Hostname: "myapp.fake", @@ -270,42 +275,46 @@ func TestConfigureCertificates(t *testing.T) { }, }} - commonConfig := &ingress.Configuration{ - Servers: servers, - } - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusCreated) + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) - if r.Method != "POST" { - t.Errorf("expected a 'POST' request, got '%s'", r.Method) - } + if r.Method != "POST" { + t.Errorf("expected a 'POST' request, got '%s'", r.Method) + } - b, err := ioutil.ReadAll(r.Body) - if err != nil && err != io.EOF { - t.Fatal(err) - } - var postedServers []ingress.Server - err = jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &postedServers) - if err != nil { - t.Fatal(err) - } + b, err := ioutil.ReadAll(r.Body) + if err != nil && err != io.EOF { + t.Fatal(err) + } + var postedServers []ingress.Server + err = jsoniter.ConfigCompatibleWithStandardLibrary.Unmarshal(b, &postedServers) + if err != nil { + t.Fatal(err) + } - if len(servers) != len(postedServers) { - t.Errorf("Expected servers to be the same length as the posted servers") - } + if len(servers) != len(postedServers) { + t.Errorf("Expected servers to be the same length as the posted servers") + } - for i, server := range servers { - if !server.Equal(&postedServers[i]) { - t.Errorf("Expected servers and posted servers to be equal") - } - } - })) + for i, server := range servers { + if !server.Equal(&postedServers[i]) { + t.Errorf("Expected servers and posted servers to be equal") + } + } + }), + }, + } + defer server.Close() + server.Start() - port := ts.Listener.Addr().(*net.TCPAddr).Port - defer ts.Close() + commonConfig := &ingress.Configuration{ + Servers: servers, + } - err := configureCertificates(commonConfig, port) + err = configureCertificates(commonConfig) if err != nil { t.Errorf("unexpected error posting dynamic certificate configuration: %v", err) } diff --git a/internal/ingress/controller/store/store.go b/internal/ingress/controller/store/store.go index a6863d31da..e537b1894e 100644 --- a/internal/ingress/controller/store/store.go +++ b/internal/ingress/controller/store/store.go @@ -323,7 +323,7 @@ func New(checkOCSP bool, klog.Infof("ignoring delete for ingress %v based on annotation %v", ing.Name, class.IngressKey) return } - if ing.Spec.Backend != nil && disableCatchAll { + if isCatchAllIngress(ing.Spec) && disableCatchAll { klog.Infof("ignoring delete for catch-all ingress %v/%v because of --disable-catch-all", ing.Namespace, ing.Name) return } @@ -348,7 +348,7 @@ func New(checkOCSP bool, klog.Infof("ignoring add for ingress %v based on annotation %v with value %v", ing.Name, class.IngressKey, a) return } - if ing.Spec.Backend != nil && disableCatchAll { + if isCatchAllIngress(ing.Spec) && disableCatchAll { klog.Infof("ignoring add for catch-all ingress %v/%v because of --disable-catch-all", ing.Namespace, ing.Name) return } @@ -370,7 +370,7 @@ func New(checkOCSP bool, validOld := class.IsValid(oldIng) validCur := class.IsValid(curIng) if !validOld && validCur { - if curIng.Spec.Backend != nil && disableCatchAll { + if isCatchAllIngress(curIng.Spec) && disableCatchAll { klog.Infof("ignoring update for catch-all ingress %v/%v because of --disable-catch-all", curIng.Namespace, curIng.Name) return } @@ -382,7 +382,7 @@ func New(checkOCSP bool, ingDeleteHandler(old) return } else if validCur && !reflect.DeepEqual(old, cur) { - if curIng.Spec.Backend != nil && disableCatchAll { + if isCatchAllIngress(curIng.Spec) && disableCatchAll { klog.Infof("ignoring update for catch-all ingress %v/%v and delete old one because of --disable-catch-all", curIng.Namespace, curIng.Name) ingDeleteHandler(old) return @@ -390,7 +390,7 @@ func New(checkOCSP bool, recorder.Eventf(curIng, corev1.EventTypeNormal, "UPDATE", fmt.Sprintf("Ingress %s/%s", curIng.Namespace, curIng.Name)) } else { - klog.Infof("ignoring ingress %v based on annotation %v", curIng.Name, class.IngressKey) + klog.V(3).Infof("No changes on ingress %v/%v. Skipping update", curIng.Namespace, curIng.Name) return } @@ -617,6 +617,12 @@ func New(checkOCSP bool, return store } +// isCatchAllIngress returns whether or not an ingress produces a +// catch-all server, and so should be ignored when --disable-catch-all is set +func isCatchAllIngress(spec extensions.IngressSpec) bool { + return spec.Backend != nil && len(spec.Rules) == 0 +} + // syncIngress parses ingress annotations converting the value of the // annotation to a go struct func (s *k8sStore) syncIngress(ing *extensions.Ingress) { diff --git a/internal/ingress/controller/store/store_test.go b/internal/ingress/controller/store/store_test.go index 91afd26de7..584296757a 100644 --- a/internal/ingress/controller/store/store_test.go +++ b/internal/ingress/controller/store/store_test.go @@ -945,8 +945,6 @@ func TestUpdateSecretIngressMap(t *testing.T) { func TestListIngresses(t *testing.T) { s := newStore(t) - sameTime := metav1.NewTime(time.Now()) - ingressToIgnore := &ingress.Ingress{ Ingress: extensions.Ingress{ ObjectMeta: metav1.ObjectMeta{ @@ -955,7 +953,7 @@ func TestListIngresses(t *testing.T) { Annotations: map[string]string{ "kubernetes.io/ingress.class": "something", }, - CreationTimestamp: sameTime, + CreationTimestamp: metav1.NewTime(time.Now()), }, Spec: extensions.IngressSpec{ Backend: &extensions.IngressBackend{ @@ -972,7 +970,7 @@ func TestListIngresses(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-3", Namespace: "testns", - CreationTimestamp: sameTime, + CreationTimestamp: metav1.NewTime(time.Now()), }, Spec: extensions.IngressSpec{ Rules: []extensions.IngressRule{ diff --git a/internal/ingress/controller/template/configmap_test.go b/internal/ingress/controller/template/configmap_test.go index e9c8a0ba40..328169bdcf 100644 --- a/internal/ingress/controller/template/configmap_test.go +++ b/internal/ingress/controller/template/configmap_test.go @@ -59,6 +59,7 @@ func TestMergeConfigMapToStruct(t *testing.T) { "skip-access-log-urls": "/log,/demo,/test", "use-proxy-protocol": "true", "disable-access-log": "true", + "access-log-params": "buffer=4k gzip", "access-log-path": "/var/log/test/access.log", "error-log-path": "/var/log/test/error.log", "use-gzip": "true", @@ -75,6 +76,7 @@ func TestMergeConfigMapToStruct(t *testing.T) { def := config.NewDefault() def.CustomHTTPErrors = []int{300, 400} def.DisableAccessLog = true + def.AccessLogParams = "buffer=4k gzip" def.AccessLogPath = "/var/log/test/access.log" def.ErrorLogPath = "/var/log/test/error.log" def.SkipAccessLogURLs = []string{"/log", "/demo", "/test"} diff --git a/internal/ingress/controller/template/template.go b/internal/ingress/controller/template/template.go index 5386b26c5c..e796ab2b14 100644 --- a/internal/ingress/controller/template/template.go +++ b/internal/ingress/controller/template/template.go @@ -28,6 +28,7 @@ import ( "os/exec" "reflect" "regexp" + "sort" "strings" text_template "text/template" "time" @@ -150,17 +151,17 @@ var ( "serverConfig": func(all config.TemplateConfig, server *ingress.Server) interface{} { return struct{ First, Second interface{} }{all, server} }, - "isValidByteSize": isValidByteSize, - "buildForwardedFor": buildForwardedFor, - "buildAuthSignURL": buildAuthSignURL, - "buildOpentracing": buildOpentracing, - "proxySetHeader": proxySetHeader, - "buildInfluxDB": buildInfluxDB, - "enforceRegexModifier": enforceRegexModifier, - "stripLocationModifer": stripLocationModifer, - "buildCustomErrorDeps": buildCustomErrorDeps, - "collectCustomErrorsPerServer": collectCustomErrorsPerServer, - "opentracingPropagateContext": opentracingPropagateContext, + "isValidByteSize": isValidByteSize, + "buildForwardedFor": buildForwardedFor, + "buildAuthSignURL": buildAuthSignURL, + "buildOpentracing": buildOpentracing, + "proxySetHeader": proxySetHeader, + "buildInfluxDB": buildInfluxDB, + "enforceRegexModifier": enforceRegexModifier, + "stripLocationModifer": stripLocationModifer, + "buildCustomErrorDeps": buildCustomErrorDeps, + "opentracingPropagateContext": opentracingPropagateContext, + "buildCustomErrorLocationsPerServer": buildCustomErrorLocationsPerServer, } ) @@ -210,8 +211,6 @@ func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { out := []string{ "lua_shared_dict configuration_data 5M", "lua_shared_dict certificate_data 16M", - "lua_shared_dict locks 512k", - "lua_shared_dict sticky_sessions 1M", } if !disableLuaRestyWAF { @@ -230,9 +229,6 @@ func buildLuaSharedDictionaries(s interface{}, disableLuaRestyWAF bool) string { } } - if len(out) == 0 { - return "" - } return strings.Join(out, ";\n\r") + ";" } @@ -840,7 +836,9 @@ func buildOpentracing(input interface{}) string { if cfg.ZipkinCollectorHost != "" { buf.WriteString("opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;") } else if cfg.JaegerCollectorHost != "" { - buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;") + buf.WriteString("opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;") + } else if cfg.DatadogCollectorHost != "" { + buf.WriteString("opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;") } buf.WriteString("\r\n") @@ -887,41 +885,72 @@ func proxySetHeader(loc interface{}) string { // buildCustomErrorDeps is a utility function returning a struct wrapper with // the data required to build the 'CUSTOM_ERRORS' template -func buildCustomErrorDeps(proxySetHeaders map[string]string, errorCodes []int, enableMetrics bool) interface{} { +func buildCustomErrorDeps(upstreamName string, errorCodes []int, enableMetrics bool) interface{} { return struct { - ProxySetHeaders map[string]string - ErrorCodes []int - EnableMetrics bool + UpstreamName string + ErrorCodes []int + EnableMetrics bool }{ - ProxySetHeaders: proxySetHeaders, - ErrorCodes: errorCodes, - EnableMetrics: enableMetrics, + UpstreamName: upstreamName, + ErrorCodes: errorCodes, + EnableMetrics: enableMetrics, } } -// collectCustomErrorsPerServer is a utility function which will collect all +type errorLocation struct { + UpstreamName string + Codes []int +} + +// buildCustomErrorLocationsPerServer is a utility function which will collect all // custom error codes for all locations of a server block, deduplicates them, -// and returns a unique set (for the template to create @custom_xxx locations) -func collectCustomErrorsPerServer(input interface{}) []int { +// and returns a set which is unique by default-upstream and error code. It returns an array +// of errorLocations, each of which contain the upstream name and a list of +// error codes for that given upstream, so that sufficiently unique +// @custom error location blocks can be created in the template +func buildCustomErrorLocationsPerServer(input interface{}) interface{} { server, ok := input.(*ingress.Server) if !ok { klog.Errorf("expected a '*ingress.Server' type but %T was returned", input) return nil } - codesMap := make(map[int]bool) + codesMap := make(map[string]map[int]bool) for _, loc := range server.Locations { + backendUpstream := loc.DefaultBackendUpstreamName + + var dedupedCodes map[int]bool + if existingMap, ok := codesMap[backendUpstream]; ok { + dedupedCodes = existingMap + } else { + dedupedCodes = make(map[int]bool) + } + for _, code := range loc.CustomHTTPErrors { - codesMap[code] = true + dedupedCodes[code] = true } + codesMap[backendUpstream] = dedupedCodes } - uniqueCodes := make([]int, 0, len(codesMap)) - for key := range codesMap { - uniqueCodes = append(uniqueCodes, key) + errorLocations := []errorLocation{} + + for upstream, dedupedCodes := range codesMap { + codesForUpstream := []int{} + for code := range dedupedCodes { + codesForUpstream = append(codesForUpstream, code) + } + sort.Ints(codesForUpstream) + errorLocations = append(errorLocations, errorLocation{ + UpstreamName: upstream, + Codes: codesForUpstream, + }) } - return uniqueCodes + sort.Slice(errorLocations, func(i, j int) bool { + return errorLocations[i].UpstreamName < errorLocations[j].UpstreamName + }) + + return errorLocations } func opentracingPropagateContext(loc interface{}) string { diff --git a/internal/ingress/controller/template/template_test.go b/internal/ingress/controller/template/template_test.go index a1fbf642fa..2afcc60904 100644 --- a/internal/ingress/controller/template/template_test.go +++ b/internal/ingress/controller/template/template_test.go @@ -33,7 +33,9 @@ import ( "k8s.io/ingress-nginx/internal/file" "k8s.io/ingress-nginx/internal/ingress" "k8s.io/ingress-nginx/internal/ingress/annotations/authreq" + "k8s.io/ingress-nginx/internal/ingress/annotations/influxdb" "k8s.io/ingress-nginx/internal/ingress/annotations/luarestywaf" + "k8s.io/ingress-nginx/internal/ingress/annotations/ratelimit" "k8s.io/ingress-nginx/internal/ingress/annotations/rewrite" "k8s.io/ingress-nginx/internal/ingress/controller/config" ) @@ -163,6 +165,14 @@ proxy_pass http://upstream_balancer;`, ) func TestBuildLuaSharedDictionaries(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildLuaSharedDictionaries(invalidType, true) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + servers := []*ingress.Server{ { Hostname: "foo.bar", @@ -209,6 +219,14 @@ func TestFormatIP(t *testing.T) { } func TestBuildLocation(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "/" + actual := buildLocation(invalidType, true) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + for k, tc := range tmplFuncTestcases { loc := &ingress.Location{ Path: tc.Path, @@ -263,6 +281,14 @@ func TestBuildProxyPass(t *testing.T) { } func TestBuildAuthLocation(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildAuthLocation(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + authURL := "foo.com/auth" loc := &ingress.Location{ @@ -275,7 +301,7 @@ func TestBuildAuthLocation(t *testing.T) { str := buildAuthLocation(loc) encodedAuthURL := strings.Replace(base64.URLEncoding.EncodeToString([]byte(loc.Path)), "=", "", -1) - expected := fmt.Sprintf("/_external-auth-%v", encodedAuthURL) + expected = fmt.Sprintf("/_external-auth-%v", encodedAuthURL) if str != expected { t.Errorf("Expected \n'%v'\nbut returned \n'%v'", expected, str) @@ -283,11 +309,19 @@ func TestBuildAuthLocation(t *testing.T) { } func TestBuildAuthResponseHeaders(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []string{} + actual := buildAuthResponseHeaders(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + loc := &ingress.Location{ ExternalAuth: authreq.Config{ResponseHeaders: []string{"h1", "H-With-Caps-And-Dashes"}}, } headers := buildAuthResponseHeaders(loc) - expected := []string{ + expected = []string{ "auth_request_set $authHeader0 $upstream_http_h1;", "proxy_set_header 'h1' $authHeader0;", "auth_request_set $authHeader1 $upstream_http_h_with_caps_and_dashes;", @@ -378,6 +412,14 @@ func BenchmarkTemplateWithData(b *testing.B) { } func TestBuildDenyVariable(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildDenyVariable(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + a := buildDenyVariable("host1.example.com_/.well-known/acme-challenge") b := buildDenyVariable("host1.example.com_/.well-known/acme-challenge") if !reflect.DeepEqual(a, b) { @@ -416,6 +458,14 @@ func TestBuildByteSize(t *testing.T) { } func TestIsLocationAllowed(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := isLocationAllowed(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + loc := ingress.Location{ Denied: nil, } @@ -427,23 +477,47 @@ func TestIsLocationAllowed(t *testing.T) { } func TestBuildForwardedFor(t *testing.T) { - inputStr := "X-Forwarded-For" - outputStr := buildForwardedFor(inputStr) + invalidType := &ingress.Ingress{} + expected := "" + actual := buildForwardedFor(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } - validStr := "$http_x_forwarded_for" + inputStr := "X-Forwarded-For" + expected = "$http_x_forwarded_for" + actual = buildForwardedFor(inputStr) - if outputStr != validStr { - t.Errorf("Expected '%v' but returned '%v'", validStr, outputStr) + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) } } func TestBuildResolversForLua(t *testing.T) { + ipOne := net.ParseIP("192.0.0.1") ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000") ipList := []net.IP{ipOne, ipTwo} - expected := "\"192.0.0.1\", \"2001:db8:1234::\"" - actual := buildResolversForLua(ipList, false) + invalidType := &ingress.Ingress{} + expected := "" + actual := buildResolversForLua(invalidType, false) + + // Invalid Type for []net.IP + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = buildResolversForLua(ipList, invalidType) + + // Invalid Type for bool + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + expected = "\"192.0.0.1\", \"2001:db8:1234::\"" + actual = buildResolversForLua(ipList, false) if expected != actual { t.Errorf("Expected '%v' but returned '%v'", expected, actual) @@ -462,6 +536,22 @@ func TestBuildResolvers(t *testing.T) { ipTwo := net.ParseIP("2001:db8:1234:0000:0000:0000:0000:0000") ipList := []net.IP{ipOne, ipTwo} + invalidType := &ingress.Ingress{} + expected := "" + actual := buildResolvers(invalidType, false) + + // Invalid Type for []net.IP + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + actual = buildResolvers(ipList, invalidType) + + // Invalid Type for bool + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + validResolver := "resolver 192.0.0.1 [2001:db8:1234::] valid=30s;" resolver := buildResolvers(ipList, false) @@ -478,6 +568,14 @@ func TestBuildResolvers(t *testing.T) { } func TestBuildNextUpstream(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildNextUpstream(invalidType, "") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + cases := map[string]struct { NextUpstream string NonIdempotent bool @@ -516,6 +614,14 @@ func TestBuildNextUpstream(t *testing.T) { } func TestBuildRateLimit(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []string{} + actual := buildRateLimit(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + loc := &ingress.Location{} loc.RateLimit.Connections.Name = "con" @@ -547,9 +653,45 @@ func TestBuildRateLimit(t *testing.T) { t.Errorf("Expected '%v' but returned '%v'", validLimits, limits) } } + + // Invalid limit + limits = buildRateLimit(&ingress.Ingress{}) + if !reflect.DeepEqual(expected, limits) { + t.Errorf("Expected '%v' but returned '%v'", expected, limits) + } +} + +// TODO: Needs more tests +func TestBuildRateLimitZones(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []string{} + actual := buildRateLimitZones(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +// TODO: Needs more tests +func TestFilterRateLimits(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := []ratelimit.Config{} + actual := filterRateLimits(invalidType) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } } func TestBuildAuthSignURL(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildAuthSignURL(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + cases := map[string]struct { Input, Output string }{ @@ -566,6 +708,13 @@ func TestBuildAuthSignURL(t *testing.T) { } func TestIsLocationInLocationList(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := isLocationInLocationList(invalidType, "") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } testCases := []struct { location *ingress.Location @@ -589,6 +738,14 @@ func TestIsLocationInLocationList(t *testing.T) { } func TestBuildUpstreamName(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildUpstreamName(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + defaultBackend := "upstream-name" defaultHost := "example.com" @@ -744,3 +901,230 @@ func TestGetIngressInformation(t *testing.T) { t.Errorf("Expected %v, but got %v", expected, info) } } + +func TestBuildCustomErrorLocationsPerServer(t *testing.T) { + testCases := []struct { + server interface{} + expectedResults []errorLocation + }{ + { // Single ingress + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{401, 402}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-backend", + Codes: []int{401, 402}, + }, + }, + }, + { // Two ingresses, overlapping error codes, same backend + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-backend", + CustomHTTPErrors: []int{402, 403}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-backend", + Codes: []int{401, 402, 403}, + }, + }, + }, + { // Two ingresses, overlapping error codes, different backends + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{402, 403}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-one", + Codes: []int{401, 402}, + }, + { + UpstreamName: "custom-default-backend-test-two", + Codes: []int{402, 403}, + }, + }, + }, + { // Many ingresses, overlapping error codes, different backends + &ingress.Server{Locations: []*ingress.Location{ + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{401, 402}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-one", + CustomHTTPErrors: []int{501, 502}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{409, 410}, + }, + { + DefaultBackendUpstreamName: "custom-default-backend-test-two", + CustomHTTPErrors: []int{504, 505}, + }, + }}, + []errorLocation{ + { + UpstreamName: "custom-default-backend-test-one", + Codes: []int{401, 402, 501, 502}, + }, + { + UpstreamName: "custom-default-backend-test-two", + Codes: []int{409, 410, 504, 505}, + }, + }, + }, + } + + for _, c := range testCases { + response := buildCustomErrorLocationsPerServer(c.server) + if results, ok := response.([]errorLocation); ok { + if !reflect.DeepEqual(c.expectedResults, results) { + t.Errorf("Expected %+v but got %+v", c.expectedResults, results) + } + } else { + t.Error("Unable to convert to []errorLocation") + } + } +} + +func TestProxySetHeader(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "proxy_set_header" + actual := proxySetHeader(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + grpcBackend := &ingress.Location{ + BackendProtocol: "GRPC", + } + + expected = "grpc_set_header" + actual = proxySetHeader(grpcBackend) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestBuildInfluxDB(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildInfluxDB(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfg := influxdb.Config{ + InfluxDBEnabled: true, + InfluxDBServerName: "ok.com", + InfluxDBHost: "host.com", + InfluxDBPort: "5252", + InfluxDBMeasurement: "ok", + } + expected = "influxdb server_name=ok.com host=host.com port=5252 measurement=ok enabled=true;" + actual = buildInfluxDB(cfg) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestBuildOpenTracing(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := "" + actual := buildOpentracing(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgJaeger := config.Configuration{ + EnableOpentracing: true, + JaegerCollectorHost: "jaeger-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libjaegertracing_plugin.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgJaeger) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgZipkin := config.Configuration{ + EnableOpentracing: true, + ZipkinCollectorHost: "zipkin-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libzipkin_opentracing.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgZipkin) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + cfgDatadog := config.Configuration{ + EnableOpentracing: true, + DatadogCollectorHost: "datadog-host.com", + } + expected = "opentracing_load_tracer /usr/local/lib/libdd_opentracing.so /etc/nginx/opentracing.json;\r\n" + actual = buildOpentracing(cfgDatadog) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + +} + +func TestEnforceRegexModifier(t *testing.T) { + invalidType := &ingress.Ingress{} + expected := false + actual := enforceRegexModifier(invalidType) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } + + locs := []*ingress.Location{ + { + Rewrite: rewrite.Config{ + Target: "/alright", + UseRegex: true, + }, + Path: "/ok", + }, + } + expected = true + actual = enforceRegexModifier(locs) + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} + +func TestStripLocationModifer(t *testing.T) { + expected := "ok.com" + actual := stripLocationModifer("~*ok.com") + + if expected != actual { + t.Errorf("Expected '%v' but returned '%v'", expected, actual) + } +} diff --git a/internal/ingress/controller/util.go b/internal/ingress/controller/util.go index 4046d02f7d..de9405cb4e 100644 --- a/internal/ingress/controller/util.go +++ b/internal/ingress/controller/util.go @@ -17,11 +17,12 @@ limitations under the License. package controller import ( - "k8s.io/apimachinery/pkg/util/intstr" "os" "os/exec" "syscall" + "k8s.io/apimachinery/pkg/util/intstr" + "fmt" "k8s.io/klog" @@ -64,9 +65,8 @@ func sysctlSomaxconn() int { return maxConns } -// sysctlFSFileMax returns the maximum number of open file descriptors (value -// of fs.file-max) or 0 in case of error. -func sysctlFSFileMax() int { +// rlimitMaxNumFiles returns hard limit for RLIMIT_NOFILE +func rlimitMaxNumFiles() int { var rLimit syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit) if err != nil { @@ -109,10 +109,5 @@ func nginxExecCommand(args ...string) *exec.Cmd { } func nginxTestCommand(cfg string) *exec.Cmd { - ngx := os.Getenv("NGINX_BINARY") - if ngx == "" { - ngx = defBinary - } - - return exec.Command("authbind", "--deep", ngx, "-c", cfg, "-t") + return exec.Command(defBinary, "-c", cfg, "-t") } diff --git a/internal/ingress/controller/util_test.go b/internal/ingress/controller/util_test.go index de85e783ac..d1e5daf280 100644 --- a/internal/ingress/controller/util_test.go +++ b/internal/ingress/controller/util_test.go @@ -20,8 +20,8 @@ import ( "testing" ) -func TestSysctlFSFileMax(t *testing.T) { - i := sysctlFSFileMax() +func TestRlimitMaxNumFiles(t *testing.T) { + i := rlimitMaxNumFiles() if i < 1 { t.Errorf("returned %v but expected > 0", i) } diff --git a/internal/ingress/defaults/main.go b/internal/ingress/defaults/main.go index 28bf659799..0242c9f7f2 100644 --- a/internal/ingress/defaults/main.go +++ b/internal/ingress/defaults/main.go @@ -50,6 +50,10 @@ type Backend struct { // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_send_timeout ProxySendTimeout int `json:"proxy-send-timeout"` + // Sets the number of the buffers used for reading a response from the proxied server + // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffers + ProxyBuffersNumber int `json:"proxy-buffers-number"` + // Sets the size of the buffer used for reading the first part of the response received from the // proxied server. This part usually contains a small response header. // http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffer_size) diff --git a/internal/ingress/metric/collectors/nginx_status.go b/internal/ingress/metric/collectors/nginx_status.go index 0df9d8d089..15f45a56f9 100644 --- a/internal/ingress/metric/collectors/nginx_status.go +++ b/internal/ingress/metric/collectors/nginx_status.go @@ -17,13 +17,12 @@ limitations under the License. package collectors import ( - "fmt" - "io/ioutil" - "net/http" + "log" "regexp" "strconv" "github.com/prometheus/client_golang/prometheus" + "k8s.io/ingress-nginx/internal/nginx" "k8s.io/klog" ) @@ -39,9 +38,6 @@ type ( nginxStatusCollector struct { scrapeChan chan scrapeRequest - ngxHealthPort int - ngxStatusPath string - data *nginxStatusData } @@ -78,12 +74,10 @@ type NGINXStatusCollector interface { } // NewNGINXStatus returns a new prometheus collector the default nginx status module -func NewNGINXStatus(podName, namespace, ingressClass string, ngxHealthPort int) (NGINXStatusCollector, error) { +func NewNGINXStatus(podName, namespace, ingressClass string) (NGINXStatusCollector, error) { p := nginxStatusCollector{ - scrapeChan: make(chan scrapeRequest), - ngxHealthPort: ngxHealthPort, - ngxStatusPath: "/nginx_status", + scrapeChan: make(chan scrapeRequest), } constLabels := prometheus.Labels{ @@ -138,24 +132,6 @@ func (p nginxStatusCollector) Stop() { close(p.scrapeChan) } -func httpBody(url string) ([]byte, error) { - resp, err := http.DefaultClient.Get(url) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx : %v", err) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx (%v)", err) - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, fmt.Errorf("unexpected error scraping nginx (status %v)", resp.StatusCode) - } - - return data, nil -} - func toInt(data []string, pos int) int { if len(data) == 0 { return 0 @@ -187,27 +163,23 @@ func parse(data string) *basicStatus { } } -func getNginxStatus(port int, path string) (*basicStatus, error) { - url := fmt.Sprintf("http://0.0.0.0:%v%v", port, path) - klog.V(3).Infof("start scraping url: %v", url) - - data, err := httpBody(url) - - if err != nil { - return nil, fmt.Errorf("unexpected error scraping nginx status page: %v", err) - } - - return parse(string(data)), nil -} - // nginxStatusCollector scrape the nginx status func (p nginxStatusCollector) scrape(ch chan<- prometheus.Metric) { - s, err := getNginxStatus(p.ngxHealthPort, p.ngxStatusPath) + klog.V(3).Infof("start scraping socket: %v", nginx.StatusPath) + status, data, err := nginx.NewGetStatusRequest(nginx.StatusPath) if err != nil { + log.Printf("%v", err) klog.Warningf("unexpected error obtaining nginx status info: %v", err) return } + if status < 200 || status >= 400 { + klog.Warningf("unexpected error obtaining nginx status info (status %v)", status) + return + } + + s := parse(string(data)) + ch <- prometheus.MustNewConstMetric(p.data.connectionsTotal, prometheus.CounterValue, float64(s.Active), "active") ch <- prometheus.MustNewConstMetric(p.data.connectionsTotal, diff --git a/internal/ingress/metric/collectors/nginx_status_test.go b/internal/ingress/metric/collectors/nginx_status_test.go index 5d6dee0c73..e42620d5a2 100644 --- a/internal/ingress/metric/collectors/nginx_status_test.go +++ b/internal/ingress/metric/collectors/nginx_status_test.go @@ -21,9 +21,12 @@ import ( "net" "net/http" "net/http/httptest" + "os" "testing" + "time" "github.com/prometheus/client_golang/prometheus" + "k8s.io/ingress-nginx/internal/nginx" ) func TestStatusCollector(t *testing.T) { @@ -96,24 +99,39 @@ func TestStatusCollector(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, c.mock) - })) - p := server.Listener.Addr().(*net.TCPAddr).Port + listener, err := net.Listen("unix", nginx.StatusSocket) + if err != nil { + t.Fatalf("crating unix listener: %s", err) + } + + server := &httptest.Server{ + Listener: listener, + Config: &http.Server{Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) - cm, err := NewNGINXStatus("pod", "default", "nginx", p) + if r.URL.Path == "/nginx_status" { + _, err := fmt.Fprintf(w, c.mock) + if err != nil { + t.Fatal(err) + } + + return + } + + fmt.Fprintf(w, "OK") + })}, + } + server.Start() + + time.Sleep(1 * time.Second) + + cm, err := NewNGINXStatus("pod", "default", "nginx") if err != nil { t.Errorf("unexpected error creating nginx status collector: %v", err) } go cm.Start() - defer func() { - server.Close() - cm.Stop() - }() - reg := prometheus.NewPedanticRegistry() if err := reg.Register(cm); err != nil { t.Errorf("registering collector failed: %s", err) @@ -124,6 +142,12 @@ func TestStatusCollector(t *testing.T) { } reg.Unregister(cm) + + server.Close() + cm.Stop() + + listener.Close() + os.Remove(nginx.StatusSocket) }) } } diff --git a/internal/ingress/metric/main.go b/internal/ingress/metric/main.go index c25343aeaa..4950a24976 100644 --- a/internal/ingress/metric/main.go +++ b/internal/ingress/metric/main.go @@ -59,7 +59,7 @@ type collector struct { } // NewCollector creates a new metric collector the for ingress controller -func NewCollector(statusPort int, metricsPerHost bool, registry *prometheus.Registry) (Collector, error) { +func NewCollector(metricsPerHost bool, registry *prometheus.Registry) (Collector, error) { podNamespace := os.Getenv("POD_NAMESPACE") if podNamespace == "" { podNamespace = "default" @@ -67,7 +67,7 @@ func NewCollector(statusPort int, metricsPerHost bool, registry *prometheus.Regi podName := os.Getenv("POD_NAME") - nc, err := collectors.NewNGINXStatus(podName, podNamespace, class.IngressClass, statusPort) + nc, err := collectors.NewNGINXStatus(podName, podNamespace, class.IngressClass) if err != nil { return nil, err } diff --git a/internal/ingress/types.go b/internal/ingress/types.go index 01aa1952ae..1a6380986b 100644 --- a/internal/ingress/types.go +++ b/internal/ingress/types.go @@ -118,6 +118,8 @@ type TrafficShapingPolicy struct { Weight int `json:"weight"` // Header on which to redirect requests to this backend Header string `json:"header"` + // HeaderValue on which to redirect requests to this backend + HeaderValue string `json:"headerValue"` // Cookie on which to redirect requests to this backend Cookie string `json:"cookie"` } @@ -237,7 +239,7 @@ type Location struct { BasicDigestAuth auth.Config `json:"basicDigestAuth,omitempty"` // Denied returns an error when this location cannot not be allowed // Requesting a denied location should return HTTP code 403. - Denied error `json:"denied,omitempty"` + Denied *string `json:"denied,omitempty"` // CorsConfig returns the Cors Configuration for the ingress rule // +optional CorsConfig cors.Config `json:"corsConfig,omitempty"` @@ -285,6 +287,9 @@ type Location struct { // DefaultBackend allows the use of a custom default backend for this location. // +optional DefaultBackend *apiv1.Service `json:"defaultBackend,omitempty"` + // DefaultBackendUpstreamName is the upstream-formatted string for the name of + // this location's custom default backend + DefaultBackendUpstreamName string `json:"defaultBackendUpstreamName,omitempty"` // XForwardedPrefix allows to add a header X-Forwarded-Prefix to the request with the // original location. // +optional @@ -306,6 +311,8 @@ type Location struct { // ModSecurity allows to enable and configure modsecurity // +optional ModSecurity modsecurity.Config `json:"modsecurity"` + // Satisfy dictates allow access if any or all is set + Satisfy string `json:"satisfy"` } // SSLPassthroughBackend describes a SSL upstream server configured diff --git a/internal/ingress/types_equals.go b/internal/ingress/types_equals.go index e0a6211584..702d7dc969 100644 --- a/internal/ingress/types_equals.go +++ b/internal/ingress/types_equals.go @@ -230,6 +230,12 @@ func (csa1 *CookieSessionAffinity) Equal(csa2 *CookieSessionAffinity) bool { if csa1.Path != csa2.Path { return false } + if csa1.Expires != csa2.Expires { + return false + } + if csa1.MaxAge != csa2.MaxAge { + return false + } return true } @@ -293,6 +299,9 @@ func (tsp1 TrafficShapingPolicy) Equal(tsp2 TrafficShapingPolicy) bool { if tsp1.Header != tsp2.Header { return false } + if tsp1.HeaderValue != tsp2.HeaderValue { + return false + } if tsp1.Cookie != tsp2.Cookie { return false } @@ -446,10 +455,34 @@ func (l1 *Location) Equal(l2 *Location) bool { return false } + if len(l1.CustomHTTPErrors) != len(l2.CustomHTTPErrors) { + return false + } + for _, code1 := range l1.CustomHTTPErrors { + found := false + for _, code2 := range l2.CustomHTTPErrors { + if code1 == code2 { + found = true + break + } + } + if !found { + return false + } + } + if !(&l1.ModSecurity).Equal(&l2.ModSecurity) { return false } + if l1.Satisfy != l2.Satisfy { + return false + } + + if l1.DefaultBackendUpstreamName != l2.DefaultBackendUpstreamName { + return false + } + return true } diff --git a/internal/nginx/main.go b/internal/nginx/main.go new file mode 100644 index 0000000000..703f78dfb1 --- /dev/null +++ b/internal/nginx/main.go @@ -0,0 +1,142 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nginx + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/tv42/httpunix" +) + +// PID defines the location of the pid file used by NGINX +var PID = "/tmp/nginx.pid" + +// StatusSocket defines the location of the unix socket used by NGINX for the status server +var StatusSocket = "/tmp/nginx-status-server.sock" + +// HealthPath defines the path used to define the health check location in NGINX +var HealthPath = "/healthz" + +// StatusPath defines the path used to expose the NGINX status page +// http://nginx.org/en/docs/http/ngx_http_stub_status_module.html +var StatusPath = "/nginx_status" + +// StreamSocket defines the location of the unix socket used by NGINX for the NGINX stream configuration socket +var StreamSocket = "/tmp/ingress-stream.sock" + +var statusLocation = "nginx-status" + +var socketClient = buildUnixSocketClient() + +// NewGetStatusRequest creates a new GET request to the internal NGINX status server +func NewGetStatusRequest(path string) (int, []byte, error) { + url := fmt.Sprintf("http+unix://%v%v", statusLocation, path) + res, err := socketClient.Get(url) + if err != nil { + return 0, nil, err + } + defer res.Body.Close() + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return 0, nil, err + } + + return res.StatusCode, data, nil +} + +// NewPostStatusRequest creates a new POST request to the internal NGINX status server +func NewPostStatusRequest(path, contentType string, data interface{}) (int, []byte, error) { + url := fmt.Sprintf("http+unix://%v%v", statusLocation, path) + + buf, err := json.Marshal(data) + if err != nil { + return 0, nil, err + } + + res, err := socketClient.Post(url, contentType, bytes.NewReader(buf)) + if err != nil { + return 0, nil, err + } + defer res.Body.Close() + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return 0, nil, err + } + + return res.StatusCode, body, nil +} + +// GetServerBlock takes an nginx.conf file and a host and tries to find the server block for that host +func GetServerBlock(conf string, host string) (string, error) { + startMsg := fmt.Sprintf("## start server %v", host) + endMsg := fmt.Sprintf("## end server %v", host) + + blockStart := strings.Index(conf, startMsg) + if blockStart < 0 { + return "", fmt.Errorf("Host %v was not found in the controller's nginx.conf", host) + } + blockStart = blockStart + len(startMsg) + + blockEnd := strings.Index(conf, endMsg) + if blockEnd < 0 { + return "", fmt.Errorf("The end of the host server block could not be found, but the beginning was") + } + + return conf[blockStart:blockEnd], nil +} + +// ReadNginxConf reads the nginx configuration file into a string +func ReadNginxConf() (string, error) { + return ReadFileToString("/etc/nginx/nginx.conf") +} + +// ReadFileToString reads any file into a string +func ReadFileToString(path string) (string, error) { + f, err := os.Open(path) + if err != nil { + return "", err + } + defer f.Close() + + contents, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + return string(contents), nil +} + +func buildUnixSocketClient() *http.Client { + u := &httpunix.Transport{ + DialTimeout: 1 * time.Second, + RequestTimeout: 10 * time.Second, + ResponseHeaderTimeout: 10 * time.Second, + } + u.RegisterLocation(statusLocation, StatusSocket) + + return &http.Client{ + Transport: u, + } +} diff --git a/rootfs/Dockerfile b/rootfs/Dockerfile index 23438b42b1..d6d522fb24 100644 --- a/rootfs/Dockerfile +++ b/rootfs/Dockerfile @@ -42,6 +42,9 @@ RUN bash -eu -c ' \ RUN setcap cap_net_bind_service=+ep /nginx-ingress-controller \ && setcap -v cap_net_bind_service=+ep /nginx-ingress-controller +RUN setcap cap_net_bind_service=+ep /usr/sbin/nginx \ + && setcap -v cap_net_bind_service=+ep /usr/sbin/nginx + # Create symlinks to redirect nginx logs to stdout and stderr docker log collector # This only works if nginx is started with CMD or ENTRYPOINT RUN ln -sf /dev/stdout /var/log/nginx/access.log diff --git a/rootfs/etc/nginx/lua/balancer.lua b/rootfs/etc/nginx/lua/balancer.lua index f0eb6de4ac..7eb49893b8 100644 --- a/rootfs/etc/nginx/lua/balancer.lua +++ b/rootfs/etc/nginx/lua/balancer.lua @@ -1,5 +1,5 @@ local ngx_balancer = require("ngx.balancer") -local json = require("cjson") +local cjson = require("cjson.safe") local util = require("util") local dns_util = require("util.dns") local configuration = require("configuration") @@ -75,7 +75,8 @@ end local function sync_backend(backend) if not backend.endpoints or #backend.endpoints == 0 then - ngx.log(ngx.INFO, string.format("there is no endpoint for backend %s. Skipping...", backend.name)) + ngx.log(ngx.INFO, string.format("there is no endpoint for backend %s. Removing...", backend.name)) + balancers[backend.name] = nil return end @@ -114,9 +115,9 @@ local function sync_backends() return end - local ok, new_backends = pcall(json.decode, backends_data) - if not ok then - ngx.log(ngx.ERR, "could not parse backends data: " .. tostring(new_backends)) + local new_backends, err = cjson.decode(backends_data) + if not new_backends then + ngx.log(ngx.ERR, "could not parse backends data: ", err) return end @@ -160,7 +161,11 @@ local function route_to_alternative_balancer(balancer) local target_header = util.replace_special_char(traffic_shaping_policy.header, "-", "_") local header = ngx.var["http_" .. target_header] if header then - if header == "always" then + if traffic_shaping_policy.headerValue and #traffic_shaping_policy.headerValue > 0 then + if traffic_shaping_policy.headerValue == header then + return true + end + elseif header == "always" then return true elseif header == "never" then return false diff --git a/rootfs/etc/nginx/lua/balancer/sticky.lua b/rootfs/etc/nginx/lua/balancer/sticky.lua index f60d1810e0..6b062b4d2a 100644 --- a/rootfs/etc/nginx/lua/balancer/sticky.lua +++ b/rootfs/etc/nginx/lua/balancer/sticky.lua @@ -4,24 +4,30 @@ local util = require("util") local ck = require("resty.cookie") local _M = balancer_resty:new({ factory = resty_chash, name = "sticky" }) +local DEFAULT_COOKIE_NAME = "route" -function _M.new(self, backend) - local nodes = util.get_nodes(backend.endpoints) +local function get_digest_func(hash) local digest_func = util.md5_digest - if backend["sessionAffinityConfig"]["cookieSessionAffinity"]["hash"] == "sha1" then + if hash == "sha1" then digest_func = util.sha1_digest end + return digest_func +end + +function _M.cookie_name(self) + return self.cookie_session_affinity.name or DEFAULT_COOKIE_NAME +end + +function _M.new(self, backend) + local nodes = util.get_nodes(backend.endpoints) + local digest_func = get_digest_func(backend["sessionAffinityConfig"]["cookieSessionAffinity"]["hash"]) local o = { instance = self.factory:new(nodes), - cookie_name = backend["sessionAffinityConfig"]["cookieSessionAffinity"]["name"] or "route", - cookie_expires = backend["sessionAffinityConfig"]["cookieSessionAffinity"]["expires"], - cookie_max_age = backend["sessionAffinityConfig"]["cookieSessionAffinity"]["maxage"], - cookie_path = backend["sessionAffinityConfig"]["cookieSessionAffinity"]["path"], - cookie_locations = backend["sessionAffinityConfig"]["cookieSessionAffinity"]["locations"], digest_func = digest_func, traffic_shaping_policy = backend.trafficShapingPolicy, alternative_backends = backend.alternativeBackends, + cookie_session_affinity = backend["sessionAffinityConfig"]["cookieSessionAffinity"] } setmetatable(o, self) self.__index = self @@ -43,25 +49,25 @@ local function set_cookie(self, value) ngx.log(ngx.ERR, err) end - local cookie_path = self.cookie_path + local cookie_path = self.cookie_session_affinity.path if not cookie_path then cookie_path = ngx.var.location_path end local cookie_data = { - key = self.cookie_name, + key = self:cookie_name(), value = value, path = cookie_path, httponly = true, secure = ngx.var.https == "on", } - if self.cookie_expires and self.cookie_expires ~= "" then - cookie_data.expires = ngx.cookie_time(ngx.time() + tonumber(self.cookie_expires)) + if self.cookie_session_affinity.expires and self.cookie_session_affinity.expires ~= "" then + cookie_data.expires = ngx.cookie_time(ngx.time() + tonumber(self.cookie_session_affinity.expires)) end - if self.cookie_max_age and self.cookie_max_age ~= "" then - cookie_data.max_age = tonumber(self.cookie_max_age) + if self.cookie_session_affinity.maxage and self.cookie_session_affinity.maxage ~= "" then + cookie_data.max_age = tonumber(self.cookie_session_affinity.maxage) end local ok @@ -78,13 +84,13 @@ function _M.balance(self) return end - local key = cookie:get(self.cookie_name) + local key = cookie:get(self:cookie_name()) if not key then local random_str = string.format("%s.%s", ngx.now(), ngx.worker.pid()) key = encrypted_endpoint_string(self, random_str) - if self.cookie_locations then - local locs = self.cookie_locations[ngx.var.host] + if self.cookie_session_affinity.locations then + local locs = self.cookie_session_affinity.locations[ngx.var.host] if locs ~= nil then for _, path in pairs(locs) do if ngx.var.location_path == path then @@ -99,4 +105,20 @@ function _M.balance(self) return self.instance:find(key) end +function _M.sync(self, backend) + balancer_resty.sync(self, backend) + + -- Reload the balancer if any of the annotations have changed. + local changed = not util.deep_compare( + self.cookie_session_affinity, + backend.sessionAffinityConfig.cookieSessionAffinity + ) + if not changed then + return + end + + self.cookie_session_affinity = backend.sessionAffinityConfig.cookieSessionAffinity + self.digest_func = get_digest_func(backend.sessionAffinityConfig.cookieSessionAffinity.hash) +end + return _M diff --git a/rootfs/etc/nginx/lua/configuration.lua b/rootfs/etc/nginx/lua/configuration.lua index ade5e4d1b0..10f86eddc9 100644 --- a/rootfs/etc/nginx/lua/configuration.lua +++ b/rootfs/etc/nginx/lua/configuration.lua @@ -1,4 +1,4 @@ -local json = require("cjson") +local cjson = require("cjson.safe") -- this is the Lua representation of Configuration struct in internal/ingress/types.go local configuration_data = ngx.shared.configuration_data @@ -49,9 +49,9 @@ local function handle_servers() local raw_servers = fetch_request_body() - local ok, servers = pcall(json.decode, raw_servers) - if not ok then - ngx.log(ngx.ERR, "could not parse servers: " .. tostring(servers)) + local servers, err = cjson.decode(raw_servers) + if not servers then + ngx.log(ngx.ERR, "could not parse servers: ", err) ngx.status = ngx.HTTP_BAD_REQUEST return end @@ -59,7 +59,8 @@ local function handle_servers() local err_buf = {} for _, server in ipairs(servers) do if server.hostname and server.sslCert.pemCertKey then - local success, err = certificate_data:safe_set(server.hostname, server.sslCert.pemCertKey) + local success + success, err = certificate_data:safe_set(server.hostname, server.sslCert.pemCertKey) if not success then if err == "no memory" then ngx.status = ngx.HTTP_INTERNAL_SERVER_ERROR @@ -109,6 +110,32 @@ local function handle_general() ngx.status = ngx.HTTP_CREATED end +local function handle_certs() + if ngx.var.request_method ~= "GET" then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Only GET requests are allowed!") + return + end + + local query = ngx.req.get_uri_args() + if not query["hostname"] then + ngx.status = ngx.HTTP_BAD_REQUEST + ngx.print("Hostname must be specified.") + return + end + + local key = _M.get_pem_cert_key(query["hostname"]) + if key then + ngx.status = ngx.HTTP_OK + ngx.print(key) + return + else + ngx.status = ngx.HTTP_NOT_FOUND + ngx.print("No key associated with this hostname.") + return + end +end + function _M.call() if ngx.var.request_method ~= "POST" and ngx.var.request_method ~= "GET" then ngx.status = ngx.HTTP_BAD_REQUEST @@ -126,6 +153,11 @@ function _M.call() return end + if ngx.var.uri == "/configuration/certs" then + handle_certs() + return + end + if ngx.var.request_uri ~= "/configuration/backends" then ngx.status = ngx.HTTP_NOT_FOUND ngx.print("Not found!") diff --git a/rootfs/etc/nginx/lua/lua_ingress.lua b/rootfs/etc/nginx/lua/lua_ingress.lua index f0d33db818..35077f9d83 100644 --- a/rootfs/etc/nginx/lua/lua_ingress.lua +++ b/rootfs/etc/nginx/lua/lua_ingress.lua @@ -2,9 +2,32 @@ local _M = {} local seeds = {} local original_randomseed = math.randomseed + +local function get_seed_from_urandom() + local seed + local frandom, err = io.open("/dev/urandom", "rb") + if not frandom then + ngx.log(ngx.WARN, 'failed to open /dev/urandom: ', err) + return nil + end + + local str = frandom:read(4) + frandom:close() + if not str then + ngx.log(ngx.WARN, 'failed to read data from /dev/urandom') + return nil + end + + seed = 0 + for i = 1, 4 do + seed = 256 * seed + str:byte(i) + end + + return seed +end + math.randomseed = function(seed) local pid = ngx.worker.pid() - if seeds[pid] then ngx.log(ngx.WARN, string.format("ignoring math.randomseed(%d) since PRNG is already seeded for worker %d", seed, pid)) @@ -16,7 +39,12 @@ math.randomseed = function(seed) end local function randomseed() - math.randomseed(ngx.time() + ngx.worker.pid()) + local seed = get_seed_from_urandom() + if not seed then + ngx.log(ngx.WARN, 'failed to get seed from urandom') + seed = ngx.now() * 1000 + ngx.worker.pid() + end + math.randomseed(seed) end function _M.init_worker() diff --git a/rootfs/etc/nginx/lua/monitor.lua b/rootfs/etc/nginx/lua/monitor.lua index a63ddc75c8..cefae7335f 100644 --- a/rootfs/etc/nginx/lua/monitor.lua +++ b/rootfs/etc/nginx/lua/monitor.lua @@ -1,12 +1,16 @@ local socket = ngx.socket.tcp -local cjson = require('cjson') +local cjson = require("cjson.safe") local assert = assert +local new_tab = require "table.new" +local clear_tab = require "table.clear" +local clone_tab = require "table.clone" -local metrics_batch = {} -- if an Nginx worker processes more than (MAX_BATCH_SIZE/FLUSH_INTERVAL) RPS then it will start dropping metrics local MAX_BATCH_SIZE = 10000 local FLUSH_INTERVAL = 1 -- second +local metrics_batch = new_tab(MAX_BATCH_SIZE, 0) + local _M = {} local function send(payload) @@ -46,12 +50,12 @@ local function flush(premature) return end - local current_metrics_batch = metrics_batch - metrics_batch = {} + local current_metrics_batch = clone_tab(metrics_batch) + clear_tab(metrics_batch) - local ok, payload = pcall(cjson.encode, current_metrics_batch) - if not ok then - ngx.log(ngx.ERR, "error while encoding metrics: " .. tostring(payload)) + local payload, err = cjson.encode(current_metrics_batch) + if not payload then + ngx.log(ngx.ERR, "error while encoding metrics: ", err) return end @@ -66,12 +70,13 @@ function _M.init_worker() end function _M.call() - if #metrics_batch >= MAX_BATCH_SIZE then + local metrics_size = #metrics_batch + if metrics_size >= MAX_BATCH_SIZE then ngx.log(ngx.WARN, "omitting metrics for the request, current batch is full") return end - table.insert(metrics_batch, metrics()) + metrics_batch[metrics_size + 1] = metrics() end if _TEST then diff --git a/rootfs/etc/nginx/lua/tcp_udp_balancer.lua b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua index 75d29c5bd7..7d9601ca8b 100644 --- a/rootfs/etc/nginx/lua/tcp_udp_balancer.lua +++ b/rootfs/etc/nginx/lua/tcp_udp_balancer.lua @@ -1,5 +1,5 @@ local ngx_balancer = require("ngx.balancer") -local json = require("cjson") +local cjson = require("cjson.safe") local util = require("util") local dns_util = require("util.dns") local configuration = require("tcp_udp_configuration") @@ -99,9 +99,9 @@ local function sync_backends() return end - local ok, new_backends = pcall(json.decode, backends_data) - if not ok then - ngx.log(ngx.ERR, "could not parse backends data: " .. tostring(new_backends)) + local new_backends, err = cjson.decode(backends_data) + if not new_backends then + ngx.log(ngx.ERR, "could not parse backends data: ", err) return end diff --git a/rootfs/etc/nginx/lua/tcp_udp_configuration.lua b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua index 8cc4111cf3..902ac59b61 100644 --- a/rootfs/etc/nginx/lua/tcp_udp_configuration.lua +++ b/rootfs/etc/nginx/lua/tcp_udp_configuration.lua @@ -1,9 +1,7 @@ -- this is the Lua representation of TCP/UDP Configuration local tcp_udp_configuration_data = ngx.shared.tcp_udp_configuration_data -local _M = { - nameservers = {} -} +local _M = {} function _M.get_backends_data() return tcp_udp_configuration_data:get("backends") diff --git a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua index 8b9741d3f6..06a6235573 100644 --- a/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua +++ b/rootfs/etc/nginx/lua/test/balancer/sticky_test.lua @@ -55,7 +55,7 @@ describe("Sticky", function() it("returns an instance containing the corresponding cookie name", function() local sticky_balancer_instance = sticky:new(test_backend) local test_backend_cookie_name = test_backend.sessionAffinityConfig.cookieSessionAffinity.name - assert.equal(sticky_balancer_instance.cookie_name, test_backend_cookie_name) + assert.equal(sticky_balancer_instance:cookie_name(), test_backend_cookie_name) end) end) @@ -65,7 +65,7 @@ describe("Sticky", function() temp_backend.sessionAffinityConfig.cookieSessionAffinity.name = nil local sticky_balancer_instance = sticky:new(temp_backend) local default_cookie_name = "route" - assert.equal(sticky_balancer_instance.cookie_name, default_cookie_name) + assert.equal(sticky_balancer_instance:cookie_name(), default_cookie_name) end) end) diff --git a/rootfs/etc/nginx/template/nginx.tmpl b/rootfs/etc/nginx/template/nginx.tmpl index d81f726cd5..815001aa1c 100644 --- a/rootfs/etc/nginx/template/nginx.tmpl +++ b/rootfs/etc/nginx/template/nginx.tmpl @@ -10,7 +10,7 @@ # Configuration checksum: {{ $all.Cfg.Checksum }} # setup custom paths that do not require root access -pid /tmp/nginx.pid; +pid {{ .PID }}; {{ if $cfg.UseGeoIP2 }} load_module /etc/nginx/modules/ngx_http_geoip2_module.so; @@ -274,7 +274,7 @@ http { {{ if $cfg.EnableSyslog }} access_log syslog:server={{ $cfg.SyslogHost }}:{{ $cfg.SyslogPort }} upstreaminfo if=$loggable; {{ else }} - access_log {{ $cfg.AccessLogPath }} upstreaminfo if=$loggable; + access_log {{ $cfg.AccessLogPath }} upstreaminfo {{ $cfg.AccessLogParams }} if=$loggable; {{ end }} {{ end }} @@ -433,13 +433,12 @@ http { ssl_ecdh_curve {{ $cfg.SSLECDHCurve }}; - {{ if .CustomErrors }} - # Custom error pages + {{ if gt (len $cfg.CustomHTTPErrors) 0 }} proxy_intercept_errors on; {{ end }} {{ range $errCode := $cfg.CustomHTTPErrors }} - error_page {{ $errCode }} = @custom_{{ $errCode }};{{ end }} + error_page {{ $errCode }} = @custom_upstream-default-backend_{{ $errCode }};{{ end }} proxy_ssl_session_reuse on; @@ -619,7 +618,7 @@ http { {{ $cfg.ServerSnippet }} {{ end }} - {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $all.ProxySetHeaders $cfg.CustomHTTPErrors $all.EnableMetrics) }} + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps "upstream-default-backend" $cfg.CustomHTTPErrors $all.EnableMetrics) }} } ## end server {{ $server.Hostname }} @@ -629,7 +628,9 @@ http { server { listen {{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}; {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Default }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }};{{ end }} - set $proxy_upstream_name "-"; + set $proxy_upstream_name "internal"; + + access_log off; location / { return 404; @@ -638,35 +639,23 @@ http { # default server, used for NGINX healthcheck and access to nginx stats server { - listen {{ $all.ListenPorts.Status }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }}; - {{ if $IsIPV6Enabled }}listen [::]:{{ $all.ListenPorts.Status }} default_server {{ if $all.Cfg.ReusePort }}reuseport{{ end }} backlog={{ $all.BacklogSize }};{{ end }} - set $proxy_upstream_name "-"; + listen unix:{{ .StatusSocket }}; + set $proxy_upstream_name "internal"; - {{ if gt (len $cfg.BlockUserAgents) 0 }} - if ($block_ua) { - return 403; - } - {{ end }} - {{ if gt (len $cfg.BlockReferers) 0 }} - if ($block_ref) { - return 403; - } + keepalive_timeout 0; + gzip off; + + access_log off; + + {{ if $cfg.EnableOpentracing }} + opentracing off; {{ end }} location {{ $healthzURI }} { - {{ if $cfg.EnableOpentracing }} - opentracing off; - {{ end }} - access_log off; return 200; } location /is-dynamic-lb-initialized { - {{ if $cfg.EnableOpentracing }} - opentracing off; - {{ end }} - access_log off; - content_by_lua_block { local configuration = require("configuration") local backend_data = configuration.get_backends_data() @@ -680,28 +669,11 @@ http { } } - location /nginx_status { - set $proxy_upstream_name "internal"; - {{ if $cfg.EnableOpentracing }} - opentracing off; - {{ end }} - - access_log off; + location {{ .StatusPath }} { stub_status on; } location /configuration { - access_log off; - {{ if $cfg.EnableOpentracing }} - opentracing off; - {{ end }} - - allow 127.0.0.1; - {{ if $IsIPV6Enabled }} - allow ::1; - {{ end }} - deny all; - # this should be equals to configuration_data dict client_max_body_size 10m; client_body_buffer_size 10m; @@ -713,16 +685,10 @@ http { } location / { - {{ if .CustomErrors }} - proxy_set_header X-Code 404; - {{ end }} - set $proxy_upstream_name "upstream-default-backend"; - proxy_set_header Host $best_http_host; - - proxy_pass http://upstream_balancer; + content_by_lua_block { + ngx.exit(ngx.HTTP_NOT_FOUND) + } } - - {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $all.ProxySetHeaders $cfg.CustomHTTPErrors $all.EnableMetrics) }} } } @@ -739,12 +705,19 @@ stream { -- init modules local ok, res + ok, res = pcall(require, "configuration") + if not ok then + error("require failed: " .. tostring(res)) + else + configuration = res + configuration.nameservers = { {{ buildResolversForLua $cfg.Resolver $cfg.DisableIpv6DNS }} } + end + ok, res = pcall(require, "tcp_udp_configuration") if not ok then error("require failed: " .. tostring(res)) else tcp_udp_configuration = res - tcp_udp_configuration.nameservers = { {{ buildResolversForLua $cfg.Resolver $cfg.DisableIpv6DNS }} } end ok, res = pcall(require, "tcp_udp_balancer") @@ -766,7 +739,7 @@ stream { {{ if $cfg.DisableAccessLog }} access_log off; {{ else }} - access_log {{ $cfg.AccessLogPath }} log_stream; + access_log {{ $cfg.AccessLogPath }} log_stream {{ $cfg.AccessLogParams }}; {{ end }} error_log {{ $cfg.ErrorLogPath }}; @@ -780,7 +753,7 @@ stream { } server { - listen unix:/tmp/ingress-stream.sock; + listen unix:{{ .StreamSocket }}; content_by_lua_block { tcp_udp_configuration.call() @@ -842,10 +815,10 @@ stream { {{/* definition of templates to avoid repetitions */}} {{ define "CUSTOM_ERRORS" }} - {{ $proxySetHeaders := .ProxySetHeaders }} {{ $enableMetrics := .EnableMetrics }} + {{ $upstreamName := .UpstreamName }} {{ range $errCode := .ErrorCodes }} - location @custom_{{ $errCode }} { + location @custom_{{ $upstreamName }}_{{ $errCode }} { internal; proxy_intercept_errors off; @@ -859,7 +832,7 @@ stream { proxy_set_header X-Service-Port $service_port; proxy_set_header Host $best_http_host; - set $proxy_upstream_name "upstream-default-backend"; + set $proxy_upstream_name {{ $upstreamName }}; rewrite (.*) / break; @@ -968,7 +941,10 @@ stream { {{ $server.ServerSnippet }} {{ end }} - {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $all.ProxySetHeaders (collectCustomErrorsPerServer $server) $all.EnableMetrics) }} + {{ range $errorLocation := (buildCustomErrorLocationsPerServer $server) }} + {{ template "CUSTOM_ERRORS" (buildCustomErrorDeps $errorLocation.UpstreamName $errorLocation.Codes $all.EnableMetrics) }} + {{ end }} + {{ $enforceRegex := enforceRegexModifier $server.Locations }} {{ range $location := $server.Locations }} @@ -1020,7 +996,7 @@ stream { proxy_buffering {{ $location.Proxy.ProxyBuffering }}; proxy_buffer_size {{ $location.Proxy.BufferSize }}; - proxy_buffers 4 {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; proxy_http_version 1.1; @@ -1171,14 +1147,25 @@ stream { {{ if not (isLocationInLocationList $location $all.Cfg.NoTLSRedirectLocations) }} # enforce ssl on server side if ($redirect_to_https) { + set_by_lua_block $redirect_host { + local ngx_re = require "ngx.re" + + local host_port, err = ngx_re.split(ngx.var.best_http_host, ":") + if err then + ngx.log(ngx.ERR, "could not parse variable: ", err) + return ngx.var.best_http_host; + end + + return host_port[1]; + } + {{ if $location.UsePortInRedirects }} # using custom ports require a different rewrite directive - {{ $redirect_port := (printf ":%v" $all.ListenPorts.HTTPS) }} - error_page 497 ={{ $all.Cfg.HTTPRedirectCode }} https://$host{{ $redirect_port }}$request_uri; - + # https://forum.nginx.org/read.php?2,155978,155978#msg-155978 + error_page 497 ={{ $all.Cfg.HTTPRedirectCode }} https://$redirect_host{{ printf ":%v" $all.ListenPorts.HTTPS }}$request_uri; return 497; {{ else }} - return {{ $all.Cfg.HTTPRedirectCode }} https://$best_http_host$request_uri; + return {{ $all.Cfg.HTTPRedirectCode }} https://$redirect_host$request_uri; {{ end }} } {{ end }} @@ -1319,7 +1306,7 @@ stream { proxy_buffering {{ $location.Proxy.ProxyBuffering }}; proxy_buffer_size {{ $location.Proxy.BufferSize }}; - proxy_buffers 4 {{ $location.Proxy.BufferSize }}; + proxy_buffers {{ $location.Proxy.BuffersNumber }} {{ $location.Proxy.BufferSize }}; proxy_request_buffering {{ $location.Proxy.RequestBuffering }}; proxy_http_version 1.1; @@ -1349,6 +1336,10 @@ stream { proxy_set_header X-Service-Port $service_port; {{ end }} + {{ if $location.Satisfy }} + satisfy {{ $location.Satisfy }}; + {{ end }} + {{/* if a location-specific error override is set, add the proxy_intercept here */}} {{ if $location.CustomHTTPErrors }} # Custom error pages per ingress @@ -1356,8 +1347,7 @@ stream { {{ end }} {{ range $errCode := $location.CustomHTTPErrors }} - error_page {{ $errCode }} = @custom_{{ $errCode }};{{ end }} - + error_page {{ $errCode }} = @custom_{{ $location.DefaultBackendUpstreamName }}_{{ $errCode }};{{ end }} {{ buildProxyPass $server.Hostname $all.Backends $location }} {{ if (or (eq $location.Proxy.ProxyRedirectFrom "default") (eq $location.Proxy.ProxyRedirectFrom "off")) }} diff --git a/test/data/config.json b/test/data/config.json index 36ea6d64ff..2c2975c69b 100644 --- a/test/data/config.json +++ b/test/data/config.json @@ -7,6 +7,7 @@ "bind-address-ipv6": [ "[2001:db8:a0b:12f0::1]" ,"[3731:54:65fe:2::a7]" ,"[33:33:33::33::33]" ], "backend": { "custom-http-errors": [404], + "proxy-buffers-number": "4", "proxy-buffer-size": "4k", "proxy-connect-timeout": 5, "proxy-read-timeout": 60, diff --git a/test/e2e-image/.gitignore b/test/e2e-image/.gitignore new file mode 100644 index 0000000000..811b620e0d --- /dev/null +++ b/test/e2e-image/.gitignore @@ -0,0 +1,3 @@ +e2e.test +ginkgo +kubectl diff --git a/test/e2e-image/Dockerfile b/test/e2e-image/Dockerfile new file mode 100644 index 0000000000..593c59c5a4 --- /dev/null +++ b/test/e2e-image/Dockerfile @@ -0,0 +1,17 @@ +FROM quay.io/kubernetes-ingress-controller/debian-base-amd64:0.1 + +RUN clean-install \ + ca-certificates \ + bash \ + tzdata + +COPY ginkgo /usr/local/bin/ +COPY kubectl /usr/local/bin/ +COPY e2e.sh /e2e.sh + +COPY manifests /manifests + +COPY wait-for-nginx.sh / +COPY e2e.test / + +CMD [ "/e2e.sh" ] diff --git a/test/e2e-image/Makefile b/test/e2e-image/Makefile new file mode 100644 index 0000000000..494a21c68f --- /dev/null +++ b/test/e2e-image/Makefile @@ -0,0 +1,23 @@ +IMAGE=nginx-ingress-controller:e2e +KUBE_VERSION ?= 1.13.3 + +.PHONY: all container getbins clean + +all: container + +container: + ./kubectl > /dev/null 2>&1 || curl -Lo ./kubectl \ + https://storage.googleapis.com/kubernetes-release/release/v$(KUBE_VERSION)/bin/linux/amd64/kubectl \ + && chmod +x ./kubectl + + $(GOPATH)/bin/ginkgo > /dev/null 2>&1 || go get github.com/onsi/ginkgo/ginkgo + cp $(GOPATH)/bin/ginkgo . + + cp ../e2e/e2e.test . + cp ../e2e/wait-for-nginx.sh . + + docker build -t $(IMAGE) . + +clean: + rm -rf _cache e2e.test kubectl cluster ginkgo + docker rmi -f $(IMAGE) || true diff --git a/test/e2e-image/e2e.sh b/test/e2e-image/e2e.sh new file mode 100755 index 0000000000..1434020c69 --- /dev/null +++ b/test/e2e-image/e2e.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +SLOW_E2E_THRESHOLD=${SLOW_E2E_THRESHOLD:-50} +FOCUS=${FOCUS:-.*} +E2E_NODES=${E2E_NODES:-5} + +if [ ! -f ${HOME}/.kube/config ]; then + kubectl config set-cluster dev --certificate-authority=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt --embed-certs=true --server="https://kubernetes.default/" + kubectl config set-credentials user --token="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + kubectl config set-context default --cluster=dev --user=user + kubectl config use-context default +fi + +echo "Granting permissions to ingress-nginx e2e service account..." +kubectl create serviceaccount ingress-nginx-e2e || true +kubectl create clusterrolebinding permissive-binding \ +--clusterrole=cluster-admin \ +--user=admin \ +--user=kubelet \ +--serviceaccount=default:ingress-nginx-e2e || true + +kubectl apply -f manifests/rbac.yaml + +ginkgo_args=( + "-randomizeSuites" + "-randomizeAllSpecs" + "-flakeAttempts=2" + "-p" + "-trace" + "--noColor=true" + "-slowSpecThreshold=${SLOW_E2E_THRESHOLD}" +) + +echo "Running e2e test suite..." +ginkgo "${ginkgo_args[@]}" \ + -focus=${FOCUS} \ + -skip="\[Serial\]" \ + -nodes=${E2E_NODES} \ + /e2e.test + +echo "Running e2e test suite with tests that require serial execution..." +ginkgo "${ginkgo_args[@]}" \ + -focus="\[Serial\]" \ + -nodes=1 \ + /e2e.test diff --git a/test/manifests/ingress-controller/mandatory.yaml b/test/e2e-image/manifests/mandatory.yaml similarity index 85% rename from test/manifests/ingress-controller/mandatory.yaml rename to test/e2e-image/manifests/mandatory.yaml index 37e721ef64..2b2f489c34 100644 --- a/test/manifests/ingress-controller/mandatory.yaml +++ b/test/e2e-image/manifests/mandatory.yaml @@ -32,61 +32,6 @@ metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: nginx-ingress-clusterrole - labels: - app.kubernetes.io/name: ingress-nginx - app.kubernetes.io/part-of: ingress-nginx -rules: - - apiGroups: - - "" - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - "extensions" - resources: - - ingresses/status - verbs: - - update --- apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/test/e2e-image/manifests/rbac.yaml b/test/e2e-image/manifests/rbac.yaml new file mode 100644 index 0000000000..7f4f79fe8d --- /dev/null +++ b/test/e2e-image/manifests/rbac.yaml @@ -0,0 +1,54 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update diff --git a/test/manifests/ingress-controller/service-nodeport.yaml b/test/e2e-image/manifests/service.yaml similarity index 53% rename from test/manifests/ingress-controller/service-nodeport.yaml rename to test/e2e-image/manifests/service.yaml index 471b9685d1..4226bb0e03 100644 --- a/test/manifests/ingress-controller/service-nodeport.yaml +++ b/test/e2e-image/manifests/service.yaml @@ -3,16 +3,15 @@ kind: Service metadata: name: ingress-nginx spec: - type: NodePort ports: - - name: http - port: 80 - targetPort: 80 - protocol: TCP - - name: https - port: 443 - targetPort: 443 - protocol: TCP + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: https + port: 443 + targetPort: 443 + protocol: TCP selector: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/part-of: ingress-nginx diff --git a/test/e2e-image/wait-for-nginx.sh b/test/e2e-image/wait-for-nginx.sh new file mode 100755 index 0000000000..250c32c6b6 --- /dev/null +++ b/test/e2e-image/wait-for-nginx.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e +set -x + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +export NAMESPACE=$1 + +echo "deploying NGINX Ingress controller in namespace $NAMESPACE" + +function on_exit { + local error_code="$?" + + test $error_code == 0 && return; + + echo "Obtaining ingress controller pod logs..." + kubectl logs -l app.kubernetes.io/name=ingress-nginx -n $NAMESPACE +} +trap on_exit EXIT + +kubectl apply --f $DIR/manifests/service.yaml +sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/manifests/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f - +cat $DIR/manifests/service.yaml | kubectl apply --namespace=$NAMESPACE -f - + +# wait for the deployment and fail if there is an error before starting the execution of any test +kubectl rollout status \ + --request-timeout=3m \ + --namespace $NAMESPACE \ + deployment nginx-ingress-controller diff --git a/test/e2e/annotations/affinity.go b/test/e2e/annotations/affinity.go index 37ee031462..ef7a915f71 100644 --- a/test/e2e/annotations/affinity.go +++ b/test/e2e/annotations/affinity.go @@ -51,16 +51,17 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -73,23 +74,64 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring(match[0])) }) + It("should change cookie name on ingress definition change", func() { + host := "change.foo.com" + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/affinity": "cookie", + "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) + }) + time.Sleep(waitForLuaSync) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("SERVERID")) + + ing.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/session-cookie-name"] = "OTHERCOOKIENAME" + f.EnsureIngress(ing) + + time.Sleep(waitForLuaSync) + + resp, _, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("OTHERCOOKIENAME")) + }) + It("should set sticky cookie with sha1 hash", func() { - host := "sticky.foo.com" + host := "sha1.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-hash": "sha1", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -103,22 +145,23 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", }) It("should set the path to /something on the generated cookie", func() { - host := "example.com" + host := "path.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", } - ing := framework.NewSingleIngress(host, "/something", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/something"). + Get(f.GetURL(framework.HTTP)+"/something"). Set("Host", host). End() @@ -128,7 +171,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", }) It("does not set the path to / on the generated cookie if there's more than one rule referring to the same backend", func() { - host := "example.com" + host := "morethanonerule.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", @@ -137,7 +180,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", f.EnsureIngress(&v1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: host, - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, Annotations: annotations, }, Spec: v1beta1.IngressSpec{ @@ -173,9 +216,10 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/something"). + Get(f.GetURL(framework.HTTP)+"/something"). Set("Host", host). End() @@ -184,7 +228,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Path=/something;")) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL+"/somewhereelese"). + Get(f.GetURL(framework.HTTP)+"/somewhereelese"). Set("Host", host). End() @@ -194,7 +238,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", }) It("should set cookie with expires", func() { - host := "cookie.foo.com" + host := "cookieexpires.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-name": "ExpiresCookie", @@ -202,30 +246,35 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", "nginx.ingress.kubernetes.io/session-cookie-max-age": "259200", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) - local, _ := time.LoadLocation("GMT") + local, err := time.LoadLocation("GMT") + Expect(err).ToNot(HaveOccurred()) + Expect(local).ShouldNot(BeNil()) + duration, _ := time.ParseDuration("48h") expected := time.Now().In(local).Add(duration).Format("Mon, 02-Jan-06 15:04") + Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring(fmt.Sprintf("Expires=%s", expected))) Expect(resp.Header.Get("Set-Cookie")).Should(ContainSubstring("Max-Age=259200")) }) It("should work with use-regex annotation and session-cookie-path", func() { - host := "cookie.foo.com" + host := "useregex.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", @@ -233,16 +282,17 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", "nginx.ingress.kubernetes.io/session-cookie-path": "/foo/bar", } - ing := framework.NewSingleIngress(host, "/foo/.*", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/foo/.*", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). Set("Host", host). End() @@ -257,23 +307,24 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", }) It("should warn user when use-regex is true and session-cookie-path is not set", func() { - host := "cookie.foo.com" + host := "useregexwarn.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", "nginx.ingress.kubernetes.io/session-cookie-name": "SERVERID", "nginx.ingress.kubernetes.io/use-regex": "true", } - ing := framework.NewSingleIngress(host, "/foo/.*", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/foo/.*", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). Set("Host", host). End() @@ -286,24 +337,25 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", }) It("should not set affinity across all server locations when using separate ingresses", func() { - host := "cookie.foo.com" + host := "separate.foo.com" annotations := map[string]string{ "nginx.ingress.kubernetes.io/affinity": "cookie", } - ing1 := framework.NewSingleIngress("ingress1", "/foo/bar", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing1 := framework.NewSingleIngress("ingress1", "/foo/bar", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing1) - ing2 := framework.NewSingleIngress("ingress2", "/foo", host, f.IngressController.Namespace, "http-svc", 80, &map[string]string{}) + ing2 := framework.NewSingleIngress("ingress2", "/foo", host, f.Namespace, "http-svc", 80, &map[string]string{}) f.EnsureIngress(ing2) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, `location /foo/bar`) && strings.Contains(server, `location /foo`) }) + time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo"). + Get(f.GetURL(framework.HTTP)+"/foo"). Set("Host", host). End() @@ -312,7 +364,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Affinity/Sticky Sessions", Expect(resp.Header.Get("Set-Cookie")).Should(Equal("")) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). Set("Host", host). End() diff --git a/test/e2e/annotations/alias.go b/test/e2e/annotations/alias.go index f15db12531..96d281a300 100644 --- a/test/e2e/annotations/alias.go +++ b/test/e2e/annotations/alias.go @@ -41,7 +41,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -50,7 +50,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -59,7 +59,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", "bar"). End() @@ -74,7 +74,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { "nginx.ingress.kubernetes.io/server-alias": "bar", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -85,7 +85,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Alias", func() { hosts := []string{"foo", "bar"} for _, host := range hosts { resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() diff --git a/test/e2e/annotations/approot.go b/test/e2e/annotations/approot.go index 9a325202d2..8d5621b376 100644 --- a/test/e2e/annotations/approot.go +++ b/test/e2e/annotations/approot.go @@ -43,7 +43,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Approot", func() { "nginx.ingress.kubernetes.io/app-root": "/foo", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Approot", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). RedirectPolicy(noRedirectPolicyFunc). Set("Host", host). diff --git a/test/e2e/annotations/auth.go b/test/e2e/annotations/auth.go index 01eb7b6389..6a9dec4b43 100644 --- a/test/e2e/annotations/auth.go +++ b/test/e2e/annotations/auth.go @@ -46,7 +46,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should return status code 200 when no authentication is configured", func() { host := "auth" - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -55,7 +55,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() @@ -73,7 +73,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-realm": "test auth", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -82,7 +82,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() @@ -95,7 +95,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should return status code 401 when authentication is configured but Authorization header is not configured", func() { host := "auth" - s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.IngressController.Namespace)) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) annotations := map[string]string{ "nginx.ingress.kubernetes.io/auth-type": "basic", @@ -103,7 +103,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-realm": "test auth", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -112,7 +112,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() @@ -125,7 +125,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should return status code 401 when authentication is configured and Authorization header is sent with invalid credentials", func() { host := "auth" - s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.IngressController.Namespace)) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) annotations := map[string]string{ "nginx.ingress.kubernetes.io/auth-type": "basic", @@ -133,7 +133,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-realm": "test auth", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -142,7 +142,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("user", "pass"). @@ -156,7 +156,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should return status code 200 when authentication is configured and Authorization header is sent", func() { host := "auth" - s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.IngressController.Namespace)) + s := f.EnsureSecret(buildSecret("foo", "bar", "test", f.Namespace)) annotations := map[string]string{ "nginx.ingress.kubernetes.io/auth-type": "basic", @@ -164,7 +164,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-realm": "test auth", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -173,7 +173,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("foo", "bar"). @@ -190,7 +190,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Data: map[string][]byte{ // invalid content @@ -206,7 +206,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-realm": "test auth", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -215,7 +215,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("foo", "bar"). @@ -234,7 +234,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { proxy_set_header My-Custom-Header 42;`, } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -251,7 +251,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { proxy_set_header My-Custom-Header 42;`, } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -268,10 +268,10 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { var httpbinIP string - err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, "httpbin", f.IngressController.Namespace, 1) + err := framework.WaitForEndpoints(f.KubeClientSet, framework.DefaultTimeout, "httpbin", f.Namespace, 1) Expect(err).NotTo(HaveOccurred()) - e, err := f.KubeClientSet.CoreV1().Endpoints(f.IngressController.Namespace).Get("httpbin", metav1.GetOptions{}) + e, err := f.KubeClientSet.CoreV1().Endpoints(f.Namespace).Get("httpbin", metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) httpbinIP = e.Subsets[0].Addresses[0].IP @@ -281,7 +281,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { "nginx.ingress.kubernetes.io/auth-signin": "http://$host/auth/start", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { @@ -291,7 +291,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should return status code 200 when signed in", func() { resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). SetBasicAuth("user", "password"). @@ -305,7 +305,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Auth", func() { It("should redirect to signin url when not signed in", func() { resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). RedirectPolicy(func(req gorequest.Request, via []gorequest.Request) error { diff --git a/test/e2e/annotations/authtls.go b/test/e2e/annotations/authtls.go index 2c44c34507..e86f251625 100644 --- a/test/e2e/annotations/authtls.go +++ b/test/e2e/annotations/authtls.go @@ -40,7 +40,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { It("should set valid auth-tls-secret", func() { host := "authtls.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace clientConfig, err := framework.CreateIngressMASecret( f.KubeClientSet, @@ -76,7 +76,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { req := gorequest.New() uri := "/" resp, _, errs := req. - Get(f.IngressController.HTTPSURL+uri). + Get(f.GetURL(framework.HTTPS)+uri). TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). Set("Host", host). End() @@ -85,7 +85,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { // Send Request Passing the Client Certs resp, _, errs = req. - Get(f.IngressController.HTTPSURL+uri). + Get(f.GetURL(framework.HTTPS)+uri). TLSClientConfig(clientConfig). Set("Host", host). End() @@ -95,7 +95,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { It("should set valid auth-tls-secret, sslVerify to off, and sslVerifyDepth to 2", func() { host := "authtls.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace _, err := framework.CreateIngressMASecret( f.KubeClientSet, @@ -129,7 +129,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { req := gorequest.New() uri := "/" resp, _, errs := req. - Get(f.IngressController.HTTPSURL+uri). + Get(f.GetURL(framework.HTTPS)+uri). TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). Set("Host", host). End() @@ -139,7 +139,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { It("should set valid auth-tls-secret, pass certificate to upstream, and error page", func() { host := "authtls.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace errorPath := "/error" @@ -152,7 +152,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { annotations := map[string]string{ "nginx.ingress.kubernetes.io/auth-tls-secret": nameSpace + "/" + host, - "nginx.ingress.kubernetes.io/auth-tls-error-page": f.IngressController.HTTPURL + errorPath, + "nginx.ingress.kubernetes.io/auth-tls-error-page": f.GetURL(framework.HTTP) + errorPath, "nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream": "true", } @@ -165,7 +165,7 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { sslVerify := "ssl_verify_client on;" sslVerifyDepth := "ssl_verify_depth 1;" - sslErrorPage := fmt.Sprintf("error_page 495 496 = %s;", f.IngressController.HTTPURL+errorPath) + sslErrorPage := fmt.Sprintf("error_page 495 496 = %s;", f.GetURL(framework.HTTP)+errorPath) sslUpstreamClientCert := "proxy_set_header ssl-client-cert $ssl_client_escaped_cert;" f.WaitForNginxServer(host, @@ -183,18 +183,18 @@ var _ = framework.IngressNginxDescribe("Annotations - AuthTLS", func() { req := gorequest.New() uri := "/" resp, _, errs := req. - Get(f.IngressController.HTTPSURL+uri). + Get(f.GetURL(framework.HTTPS)+uri). TLSClientConfig(&tls.Config{ServerName: host, InsecureSkipVerify: true}). Set("Host", host). RedirectPolicy(noRedirectPolicyFunc). End() Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusFound)) - Expect(resp.Header.Get("Location")).Should(Equal(f.IngressController.HTTPURL + errorPath)) + Expect(resp.Header.Get("Location")).Should(Equal(f.GetURL(framework.HTTP) + errorPath)) // Send Request Passing the Client Certs resp, _, errs = req. - Get(f.IngressController.HTTPSURL+uri). + Get(f.GetURL(framework.HTTPS)+uri). TLSClientConfig(clientConfig). Set("Host", host). End() diff --git a/test/e2e/annotations/backendprotocol.go b/test/e2e/annotations/backendprotocol.go index 1723e3b872..24b57cd48c 100644 --- a/test/e2e/annotations/backendprotocol.go +++ b/test/e2e/annotations/backendprotocol.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Backendprotocol", func() { "nginx.ingress.kubernetes.io/backend-protocol": "HTTPS", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Backendprotocol", func() { "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -68,7 +68,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Backendprotocol", func() { "nginx.ingress.kubernetes.io/backend-protocol": "GRPCS", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -83,7 +83,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Backendprotocol", func() { "nginx.ingress.kubernetes.io/backend-protocol": "AJP", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/canary.go b/test/e2e/annotations/canary.go index 67e3d7e02c..1c9aa9bac9 100644 --- a/test/e2e/annotations/canary.go +++ b/test/e2e/annotations/canary.go @@ -48,7 +48,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -63,14 +63,14 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) time.Sleep(waitForLuaSync) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -90,7 +90,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -98,7 +98,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { time.Sleep(waitForLuaSync) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -115,7 +115,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -130,7 +130,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -141,7 +141,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { f.NewEchoDeploymentWithReplicas(0) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -155,7 +155,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { f.NewDeployment("http-svc-canary", "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, 0) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "never"). End() @@ -169,7 +169,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -184,7 +184,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -192,7 +192,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the mainline ingress to the maineline upstream") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "never"). End() @@ -205,7 +205,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the canary ingress to the canary upstream") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -225,7 +225,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -233,7 +233,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -243,7 +243,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the mainline ingress to the mainelin upstream") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "never"). End() @@ -256,7 +256,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the canary ingress to the canary upstream") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -270,7 +270,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -285,7 +285,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -295,7 +295,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { "foo": "bar", } - modIng := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &modAnnotations) + modIng := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &modAnnotations) f.EnsureIngress(modIng) @@ -307,7 +307,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined fro the mainline ingress to the mainline upstream") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "never"). End() @@ -320,7 +320,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the canary ingress to the canary upstream") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -334,7 +334,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -349,7 +349,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -360,7 +360,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader2", } - modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) + modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) f.EnsureIngress(modCanaryIng) time.Sleep(waitForLuaSync) @@ -368,7 +368,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the mainline ingress to the mainline upstream") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader2", "never"). End() @@ -381,7 +381,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests destined for the canary ingress to the canary upstream") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader2", "always"). End() @@ -392,12 +392,12 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { }) }) - Context("when canaried by header", func() { + Context("when canaried by header with no value", func() { It("should route requests to the correct upstream", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -412,7 +412,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -421,7 +421,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the canary upstream when header is set to 'always'") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "always"). End() @@ -433,7 +433,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the mainline upstream when header is set to 'never'") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "never"). End() @@ -446,7 +446,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the mainline upstream when header is set to anything else") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("CanaryByHeader", "badheadervalue"). End() @@ -458,12 +458,134 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { }) }) + Context("when canaried by header with value", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + "nginx.ingress.kubernetes.io/canary-by-header-value": "DoCanary", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when header is set to 'DoCanary'") + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "DoCanary"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to 'always'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "always"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to 'never'") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "never"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + + By("routing requests to the mainline upstream when header is set to anything else") + + resp, body, errs = gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "otherheadervalue"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc")) + Expect(body).ShouldNot(ContainSubstring("http-svc-canary")) + }) + }) + + Context("when canaried by header with value and cookie", func() { + It("should route requests to the correct upstream", func() { + host := "foo" + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring("server_name foo")) + }) + + canaryAnnotations := map[string]string{ + "nginx.ingress.kubernetes.io/canary": "true", + "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", + "nginx.ingress.kubernetes.io/canary-by-header-value": "DoCanary", + "nginx.ingress.kubernetes.io/canary-by-cookie": "CanaryByCookie", + } + + canaryIngName := fmt.Sprintf("%v-canary", host) + + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", + 80, &canaryAnnotations) + f.EnsureIngress(canaryIng) + + time.Sleep(waitForLuaSync) + + By("routing requests to the canary upstream when header value does not match and cookie is set to 'always'") + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + Set("CanaryByHeader", "otherheadervalue"). + AddCookie(&http.Cookie{Name: "CanaryByCookie", Value: "always"}). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring("http-svc-canary")) + }) + }) + Context("when canaried by cookie", func() { It("should route requests to the correct upstream", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -478,7 +600,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -486,7 +608,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the canary upstream when cookie is set to 'always'") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "always"}). End() @@ -498,7 +620,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the mainline upstream when cookie is set to 'never'") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "never"}). End() @@ -511,7 +633,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("routing requests to the mainline upstream when cookie is set to anything else") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). AddCookie(&http.Cookie{Name: "Canary-By-Cookie", Value: "badcookievalue"}). End() @@ -529,7 +651,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { host := "foo" annotations := map[string]string{} - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -544,7 +666,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { canaryIngName := fmt.Sprintf("%v-canary", host) - canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", + canaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &canaryAnnotations) f.EnsureIngress(canaryIng) @@ -553,7 +675,7 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { By("returning requests from the mainline only when weight is equal to 0") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -569,14 +691,14 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { "nginx.ingress.kubernetes.io/canary-weight": "100", } - modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) + modCanaryIng := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &modCanaryAnnotations) f.EnsureIngress(modCanaryIng) time.Sleep(waitForLuaSync) resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -596,16 +718,16 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", } - ing := framework.NewSingleCatchAllIngress(canaryIngName, f.IngressController.Namespace, "http-svc-canary", 80, &annotations) + ing := framework.NewSingleCatchAllIngress(canaryIngName, f.Namespace, "http-svc-canary", 80, &annotations) f.EnsureIngress(ing) - ing = framework.NewSingleCatchAllIngress(host, f.IngressController.Namespace, "http-svc", 80, nil) + ing = framework.NewSingleCatchAllIngress(host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxServer("_", func(server string) bool { - upstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.IngressController.Namespace, "http-svc", "80") - canaryUpstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.IngressController.Namespace, "http-svc-canary", "80") + upstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, "http-svc", "80") + canaryUpstreamName := fmt.Sprintf(`set $proxy_upstream_name "%s-%s-%s";`, f.Namespace, "http-svc-canary", "80") return Expect(server).Should(ContainSubstring(`set $ingress_name "`+host+`";`)) && Expect(server).ShouldNot(ContainSubstring(`set $proxy_upstream_name "upstream-default-backend";`)) && Expect(server).ShouldNot(ContainSubstring(canaryUpstreamName)) && @@ -621,11 +743,11 @@ var _ = framework.IngressNginxDescribe("Annotations - canary", func() { "nginx.ingress.kubernetes.io/canary-by-header": "CanaryByHeader", } - ing := framework.NewSingleIngress(canaryIngName, "/", host, f.IngressController.Namespace, "http-svc-canary", 80, &annotations) + ing := framework.NewSingleIngress(canaryIngName, "/", host, f.Namespace, "http-svc-canary", 80, &annotations) f.EnsureIngress(ing) otherHost := "bar" - ing = framework.NewSingleIngress(otherHost, "/", otherHost, f.IngressController.Namespace, "http-svc", 80, nil) + ing = framework.NewSingleIngress(otherHost, "/", otherHost, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) time.Sleep(waitForLuaSync) diff --git a/test/e2e/annotations/clientbodybuffersize.go b/test/e2e/annotations/clientbodybuffersize.go index 621deb65d3..cfaa664e14 100644 --- a/test/e2e/annotations/clientbodybuffersize.go +++ b/test/e2e/annotations/clientbodybuffersize.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1000", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1K", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -68,7 +68,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1k", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -83,7 +83,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1m", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -98,7 +98,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1M", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -113,7 +113,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Client-Body-Buffer-Size", "nginx.ingress.kubernetes.io/client-body-buffer-size": "1b", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/connection.go b/test/e2e/annotations/connection.go index 6ca77cce78..5becd22ac9 100644 --- a/test/e2e/annotations/connection.go +++ b/test/e2e/annotations/connection.go @@ -43,7 +43,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Connection", func() { "nginx.ingress.kubernetes.io/connection-proxy-header": "keep-alive", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -52,7 +52,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Connection", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", host). End() diff --git a/test/e2e/annotations/cors.go b/test/e2e/annotations/cors.go index 4fb4c9d4fc..ec1c5560e5 100644 --- a/test/e2e/annotations/cors.go +++ b/test/e2e/annotations/cors.go @@ -42,7 +42,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/enable-cors": "true", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -72,7 +72,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { uri := "/" resp, _, errs := gorequest.New(). - Options(f.IngressController.HTTPURL+uri). + Options(f.GetURL(framework.HTTP)+uri). Set("Host", host). End() Expect(errs).Should(BeEmpty()) @@ -86,7 +86,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/cors-allow-methods": "POST, GET", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -102,7 +102,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/cors-max-age": "200", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -118,7 +118,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/cors-allow-credentials": "false", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -134,7 +134,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/cors-allow-origin": "https://origin.cors.com:8080", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -150,7 +150,7 @@ var _ = framework.IngressNginxDescribe("Annotations - CORS", func() { "nginx.ingress.kubernetes.io/cors-allow-headers": "DNT, User-Agent", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/customhttperrors.go b/test/e2e/annotations/customhttperrors.go index c74b629854..0a623fe923 100644 --- a/test/e2e/annotations/customhttperrors.go +++ b/test/e2e/annotations/customhttperrors.go @@ -22,20 +22,26 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress-nginx/test/e2e/framework" ) +func errorBlockName(upstreamName string, errorCode string) string { + return fmt.Sprintf("@custom_%s_%s", upstreamName, errorCode) +} + var _ = framework.IngressNginxDescribe("Annotations - custom-http-errors", func() { f := framework.NewDefaultFramework("custom-http-errors") BeforeEach(func() { - f.NewEchoDeploymentWithReplicas(2) + f.NewEchoDeploymentWithReplicas(1) }) AfterEach(func() { }) - It("should set proxy_intercept_errors", func() { + It("configures Nginx correctly", func() { host := "customerrors.foo.com" errorCodes := []string{"404", "500"} @@ -44,76 +50,73 @@ var _ = framework.IngressNginxDescribe("Annotations - custom-http-errors", func( "nginx.ingress.kubernetes.io/custom-http-errors": strings.Join(errorCodes, ","), } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) - f.WaitForNginxServer(host, - func(server string) bool { - return Expect(server).Should(ContainSubstring("proxy_intercept_errors on;")) - }) - }) + var serverConfig string + f.WaitForNginxServer(host, func(sc string) bool { + serverConfig = sc + return strings.Contains(serverConfig, fmt.Sprintf("server_name %s", host)) + }) - It("should create error routes", func() { - host := "customerrors.foo.com" - errorCodes := []string{"404", "500"} - - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/custom-http-errors": strings.Join(errorCodes, ","), - } - - ing := framework.NewSingleIngress(host, "/test", host, f.IngressController.Namespace, "http-svc", 80, &annotations) - f.EnsureIngress(ing) + By("turning on proxy_intercept_errors directive") + Expect(serverConfig).Should(ContainSubstring("proxy_intercept_errors on;")) + By("configuring error_page directive") for _, code := range errorCodes { - f.WaitForNginxServer(host, - func(server string) bool { - return Expect(server).Should(ContainSubstring(fmt.Sprintf("@custom_%s", code))) - }) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("error_page %s = %s", code, errorBlockName("upstream-default-backend", code)))) } - }) - - It("should set up error_page routing", func() { - host := "customerrors.foo.com" - errorCodes := []string{"404", "500"} - - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/custom-http-errors": strings.Join(errorCodes, ","), - } - - ing := framework.NewSingleIngress(host, "/test", host, f.IngressController.Namespace, "http-svc", 80, &annotations) - f.EnsureIngress(ing) + By("creating error locations") for _, code := range errorCodes { - f.WaitForNginxServer(host, - func(server string) bool { - return Expect(server).Should(ContainSubstring(fmt.Sprintf("error_page %s = @custom_%s", code, code))) - }) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", code)))) } - }) - It("should create only one of each error route", func() { - host := "customerrors.foo.com" - errorCodes := [][]string{{"404", "500"}, {"400", "404"}} - - for i, codeSet := range errorCodes { - annotations := map[string]string{ - "nginx.ingress.kubernetes.io/custom-http-errors": strings.Join(codeSet, ","), + By("updating configuration when only custom-http-error value changes") + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/custom-http-errors"] = "503" + return nil + }) + Expect(err).ToNot(HaveOccurred()) + f.WaitForNginxServer(host, func(sc string) bool { + if serverConfig != sc { + serverConfig = sc + return true } - - ing := framework.NewSingleIngress( - fmt.Sprintf("%s-%d", host, i), fmt.Sprintf("/test-%d", i), - host, f.IngressController.Namespace, "http-svc", 80, &annotations) - f.EnsureIngress(ing) - } - - for _, codeSet := range errorCodes { - for _, code := range codeSet { - f.WaitForNginxServer(host, - func(server string) bool { - count := strings.Count(server, fmt.Sprintf("location @custom_%s", code)) - return Expect(count).Should(Equal(1)) - }) + return false + }) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "503")))) + Expect(serverConfig).ShouldNot(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "404")))) + Expect(serverConfig).ShouldNot(ContainSubstring(fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "500")))) + + By("ignoring duplicate values (503 in this case) per server") + annotations["nginx.ingress.kubernetes.io/custom-http-errors"] = "404, 503" + ing = framework.NewSingleIngress(fmt.Sprintf("%s-else", host), "/else", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + f.WaitForNginxServer(host, func(sc string) bool { + serverConfig = sc + return strings.Contains(serverConfig, "location /else") + }) + count := strings.Count(serverConfig, fmt.Sprintf("location %s", errorBlockName("upstream-default-backend", "503"))) + Expect(count).Should(Equal(1)) + + By("using the custom default-backend from annotation for upstream") + customDefaultBackend := "from-annotation" + f.NewEchoDeploymentWithNameAndReplicas(customDefaultBackend, 1) + + err = framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/default-backend"] = customDefaultBackend + return nil + }) + Expect(err).ToNot(HaveOccurred()) + f.WaitForNginxServer(host, func(sc string) bool { + if serverConfig != sc { + serverConfig = sc + return true } - } + return false + }) + Expect(serverConfig).Should(ContainSubstring(errorBlockName(fmt.Sprintf("custom-default-backend-%s", customDefaultBackend), "503"))) + Expect(serverConfig).Should(ContainSubstring(fmt.Sprintf("error_page %s = %s", "503", errorBlockName(fmt.Sprintf("custom-default-backend-%s", customDefaultBackend), "503")))) }) }) diff --git a/test/e2e/annotations/default_backend.go b/test/e2e/annotations/default_backend.go index 0d105c3862..d2806e0da0 100644 --- a/test/e2e/annotations/default_backend.go +++ b/test/e2e/annotations/default_backend.go @@ -42,7 +42,7 @@ var _ = framework.IngressNginxDescribe("Annotations - custom default-backend", f "nginx.ingress.kubernetes.io/default-backend": "http-svc", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "invalid", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "invalid", 80, &annotations) f.EnsureIngress(ing) time.Sleep(5 * time.Second) @@ -54,7 +54,7 @@ var _ = framework.IngressNginxDescribe("Annotations - custom default-backend", f uri := "/alma/armud" resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+uri). + Get(f.GetURL(framework.HTTP)+uri). Set("Host", host). End() diff --git a/test/e2e/annotations/forcesslredirect.go b/test/e2e/annotations/forcesslredirect.go index e46eb6465f..759e5392c0 100644 --- a/test/e2e/annotations/forcesslredirect.go +++ b/test/e2e/annotations/forcesslredirect.go @@ -43,17 +43,17 @@ var _ = framework.IngressNginxDescribe("Annotations - Forcesslredirect", func() "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return Expect(server).Should(ContainSubstring(`if ($redirect_to_https) {`)) && - Expect(server).Should(ContainSubstring(`return 308 https://$best_http_host$request_uri;`)) + Expect(server).Should(ContainSubstring(`return 308 https://$redirect_host$request_uri;`)) }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Retry(10, 1*time.Second, http.StatusNotFound). RedirectPolicy(noRedirectPolicyFunc). Set("Host", host). diff --git a/test/e2e/annotations/fromtowwwredirect.go b/test/e2e/annotations/fromtowwwredirect.go index 4aaa5909bc..d415549d1c 100644 --- a/test/e2e/annotations/fromtowwwredirect.go +++ b/test/e2e/annotations/fromtowwwredirect.go @@ -47,7 +47,7 @@ var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", fun "nginx.ingress.kubernetes.io/from-to-www-redirect": "true", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxConfiguration( @@ -59,7 +59,7 @@ var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", fun By("sending request to www.fromtowwwredirect.bar.com") resp, _, errs := gorequest.New(). - Get(fmt.Sprintf("%s/%s", f.IngressController.HTTPURL, "foo")). + Get(fmt.Sprintf("%s/%s", f.GetURL(framework.HTTP), "foo")). Retry(10, 1*time.Second, http.StatusNotFound). RedirectPolicy(noRedirectPolicyFunc). Set("Host", fmt.Sprintf("%s.%s", "www", host)). @@ -78,7 +78,7 @@ var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", fun "nginx.ingress.kubernetes.io/from-to-www-redirect": "true", } - ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host, fmt.Sprintf("www.%v", host)}, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host, fmt.Sprintf("www.%v", host)}, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, @@ -90,7 +90,7 @@ var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", fun f.WaitForNginxServer(fmt.Sprintf("www.%v", host), func(server string) bool { return Expect(server).Should(ContainSubstring(`server_name www.fromtowwwredirect.bar.com;`)) && - Expect(server).Should(ContainSubstring(fmt.Sprintf("/etc/ingress-controller/ssl/%v-fromtowwwredirect.bar.com.pem", f.IngressController.Namespace))) && + Expect(server).Should(ContainSubstring(fmt.Sprintf("/etc/ingress-controller/ssl/%v-fromtowwwredirect.bar.com.pem", f.Namespace))) && Expect(server).Should(ContainSubstring(`return 308 $scheme://fromtowwwredirect.bar.com$request_uri;`)) }) @@ -103,7 +103,7 @@ var _ = framework.IngressNginxDescribe("Annotations - from-to-www-redirect", fun InsecureSkipVerify: true, ServerName: h, }). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). Retry(10, 1*time.Second, http.StatusNotFound). RedirectPolicy(noRedirectPolicyFunc). Set("host", h). diff --git a/test/e2e/annotations/grpc.go b/test/e2e/annotations/grpc.go index b3c317cab4..bb3c894afa 100644 --- a/test/e2e/annotations/grpc.go +++ b/test/e2e/annotations/grpc.go @@ -40,7 +40,7 @@ var _ = framework.IngressNginxDescribe("Annotations - grpc", func() { "nginx.ingress.kubernetes.io/backend-protocol": "GRPC", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "fortune-teller", 50051, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "fortune-teller", 50051, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/http2pushpreload.go b/test/e2e/annotations/http2pushpreload.go index 66211e77fb..5ba7212b1e 100644 --- a/test/e2e/annotations/http2pushpreload.go +++ b/test/e2e/annotations/http2pushpreload.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Annotations - HTTP2 Push Preload", func( "nginx.ingress.kubernetes.io/http2-push-preload": "true", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/influxdb.go b/test/e2e/annotations/influxdb.go index efd572c311..23917b6b8c 100644 --- a/test/e2e/annotations/influxdb.go +++ b/test/e2e/annotations/influxdb.go @@ -67,7 +67,7 @@ var _ = framework.IngressNginxDescribe("Annotations - influxdb", func() { // Do a request to the echo server ingress that sends metrics // to the InfluxDB backend. res, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -103,7 +103,7 @@ func createInfluxDBService(f *framework.Framework) *corev1.Service { service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "inflxudb-svc", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{Ports: []corev1.ServicePort{ { @@ -123,7 +123,7 @@ func createInfluxDBService(f *framework.Framework) *corev1.Service { } func createInfluxDBIngress(f *framework.Framework, host, service string, port int, annotations map[string]string) { - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, service, port, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, service, port, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -133,7 +133,7 @@ func createInfluxDBIngress(f *framework.Framework, host, service string, port in } func extractInfluxDBMeasurements(f *framework.Framework) (string, error) { - l, err := f.KubeClientSet.CoreV1().Pods(f.IngressController.Namespace).List(metav1.ListOptions{ + l, err := f.KubeClientSet.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{ LabelSelector: "app=influxdb-svc", }) if err != nil { diff --git a/test/e2e/annotations/ipwhitelist.go b/test/e2e/annotations/ipwhitelist.go index 79125cff80..321ca236da 100644 --- a/test/e2e/annotations/ipwhitelist.go +++ b/test/e2e/annotations/ipwhitelist.go @@ -37,7 +37,7 @@ var _ = framework.IngressNginxDescribe("Annotations - IPWhiteList", func() { It("should set valid ip whitelist range", func() { host := "ipwhitelist.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace annotations := map[string]string{ "nginx.ingress.kubernetes.io/whitelist-source-range": "18.0.0.0/8, 56.0.0.0/8", diff --git a/test/e2e/annotations/log.go b/test/e2e/annotations/log.go index 9ae77ad519..068f953bd6 100644 --- a/test/e2e/annotations/log.go +++ b/test/e2e/annotations/log.go @@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Log", func() { "nginx.ingress.kubernetes.io/enable-access-log": "false", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -54,7 +54,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Log", func() { "nginx.ingress.kubernetes.io/enable-rewrite-log": "true", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/luarestywaf.go b/test/e2e/annotations/luarestywaf.go index b4c2dcbdd6..aaeda0c59f 100644 --- a/test/e2e/annotations/luarestywaf.go +++ b/test/e2e/annotations/luarestywaf.go @@ -40,7 +40,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { host := "foo" createIngress(f, host, "http-svc", 80, map[string]string{"nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) - url := fmt.Sprintf("%s?msg=XSS", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -55,7 +55,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { "nginx.ingress.kubernetes.io/lua-resty-waf": "active", "nginx.ingress.kubernetes.io/lua-resty-waf-ignore-rulesets": "41000_sqli, 42000_xss"}) - url := fmt.Sprintf("%s?msg=XSS", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -70,7 +70,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { "nginx.ingress.kubernetes.io/lua-resty-waf": "active", "nginx.ingress.kubernetes.io/lua-resty-waf-score-threshold": "20"}) - url := fmt.Sprintf("%s?msg=XSS", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -86,7 +86,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { "nginx.ingress.kubernetes.io/lua-resty-waf-allow-unknown-content-types": "true", "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) - url := fmt.Sprintf("%s?msg=my-message", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -103,7 +103,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { "nginx.ingress.kubernetes.io/lua-resty-waf-process-multipart-body": "false", "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) - url := fmt.Sprintf("%s?msg=my-message", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -119,7 +119,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { createIngress(f, host, "http-svc", 80, map[string]string{ "nginx.ingress.kubernetes.io/lua-resty-waf": "active"}) - url := fmt.Sprintf("%s?msg=my-message", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -148,7 +148,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { ]=]`, }) - url := fmt.Sprintf("%s?msg=my-message", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=my-message", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -157,7 +157,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { Expect(len(errs)).Should(Equal(0)) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) - url = fmt.Sprintf("%s?msg=my-foo-message", f.IngressController.HTTPURL) + url = fmt.Sprintf("%s?msg=my-foo-message", f.GetURL(framework.HTTP)) resp, _, errs = gorequest.New(). Get(url). Set("Host", host). @@ -172,7 +172,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { host := "foo" createIngress(f, host, "http-svc", 80, map[string]string{}) - url := fmt.Sprintf("%s?msg=XSS", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -185,7 +185,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { host := "foo" createIngress(f, host, "http-svc", 80, map[string]string{"nginx.ingress.kubernetes.io/lua-resty-waf": "simulate"}) - url := fmt.Sprintf("%s?msg=XSS", f.IngressController.HTTPURL) + url := fmt.Sprintf("%s?msg=XSS", f.GetURL(framework.HTTP)) resp, _, errs := gorequest.New(). Get(url). Set("Host", host). @@ -203,7 +203,7 @@ var _ = framework.IngressNginxDescribe("Annotations - lua-resty-waf", func() { }) func createIngress(f *framework.Framework, host, service string, port int, annotations map[string]string) { - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, service, port, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, service, port, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -214,7 +214,7 @@ func createIngress(f *framework.Framework, host, service string, port int, annot time.Sleep(1 * time.Second) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() diff --git a/test/e2e/annotations/modsecurity.go b/test/e2e/annotations/modsecurity.go index 0024f4b4c6..60343f644a 100644 --- a/test/e2e/annotations/modsecurity.go +++ b/test/e2e/annotations/modsecurity.go @@ -35,7 +35,7 @@ var _ = framework.IngressNginxDescribe("Annotations - ModSecurityLocation", func It("should enable modsecurity", func() { host := "modsecurity.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace annotations := map[string]string{ "nginx.ingress.kubernetes.io/enable-modsecurity": "true", @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Annotations - ModSecurityLocation", func It("should enable modsecurity with transaction ID and OWASP rules", func() { host := "modsecurity.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace annotations := map[string]string{ "nginx.ingress.kubernetes.io/enable-modsecurity": "true", @@ -74,7 +74,7 @@ var _ = framework.IngressNginxDescribe("Annotations - ModSecurityLocation", func It("should disable modsecurity", func() { host := "modsecurity.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace annotations := map[string]string{ "nginx.ingress.kubernetes.io/enable-modsecurity": "false", @@ -91,7 +91,7 @@ var _ = framework.IngressNginxDescribe("Annotations - ModSecurityLocation", func It("should enable modsecurity with snippet", func() { host := "modsecurity.foo.com" - nameSpace := f.IngressController.Namespace + nameSpace := f.Namespace annotations := map[string]string{ "nginx.ingress.kubernetes.io/enable-modsecurity": "true", diff --git a/test/e2e/annotations/proxy.go b/test/e2e/annotations/proxy.go index f6169f4a37..3b5e5205f5 100644 --- a/test/e2e/annotations/proxy.go +++ b/test/e2e/annotations/proxy.go @@ -42,7 +42,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -57,7 +57,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -72,7 +72,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-redirect-to": "goodbye.com", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -86,7 +86,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-body-size": "8m", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -100,7 +100,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-body-size": "15r", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -116,7 +116,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-read-timeout": "20", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -134,7 +134,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-read-timeout": "20k", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -147,18 +147,19 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { It("should turn on proxy-buffering", func() { annotations := map[string]string{ - "nginx.ingress.kubernetes.io/proxy-buffering": "on", - "nginx.ingress.kubernetes.io/proxy-buffer-size": "8k", + "nginx.ingress.kubernetes.io/proxy-buffering": "on", + "nginx.ingress.kubernetes.io/proxy-buffers-number": "8", + "nginx.ingress.kubernetes.io/proxy-buffer-size": "8k", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, "proxy_buffering on;") && strings.Contains(server, "proxy_buffer_size 8k;") && - strings.Contains(server, "proxy_buffers 4 8k;") && + strings.Contains(server, "proxy_buffers 8 8k;") && strings.Contains(server, "proxy_request_buffering on;") }) }) @@ -168,7 +169,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-request-buffering": "off", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -183,7 +184,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-next-upstream-tries": "5", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -199,7 +200,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Proxy", func() { "nginx.ingress.kubernetes.io/proxy-cookie-path": "/one/ /", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/redirect.go b/test/e2e/annotations/redirect.go index 563c247782..7e6171ba8e 100644 --- a/test/e2e/annotations/redirect.go +++ b/test/e2e/annotations/redirect.go @@ -52,7 +52,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Redirect", func() { annotations := map[string]string{"nginx.ingress.kubernetes.io/permanent-redirect": redirectURL} - ing := framework.NewSingleIngress(host, redirectPath, host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, redirectPath, host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -64,7 +64,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Redirect", func() { By("sending request to redirected URL path") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+redirectPath). + Get(f.GetURL(framework.HTTP)+redirectPath). Set("Host", host). RedirectPolicy(noRedirectPolicyFunc). End() @@ -88,7 +88,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Redirect", func() { "nginx.ingress.kubernetes.io/permanent-redirect-code": strconv.Itoa(redirectCode), } - ing := framework.NewSingleIngress(host, redirectPath, host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, redirectPath, host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -100,7 +100,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Redirect", func() { By("sending request to redirected URL path") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+redirectPath). + Get(f.GetURL(framework.HTTP)+redirectPath). Set("Host", host). RedirectPolicy(noRedirectPolicyFunc). End() diff --git a/test/e2e/annotations/rewrite.go b/test/e2e/annotations/rewrite.go index a0ed9ed254..c5137a34c9 100644 --- a/test/e2e/annotations/rewrite.go +++ b/test/e2e/annotations/rewrite.go @@ -47,7 +47,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { "nginx.ingress.kubernetes.io/enable-rewrite-log": "true", } - ing := framework.NewSingleIngress(host, "/something", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -56,7 +56,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/something"). + Get(f.GetURL(framework.HTTP)+"/something"). Set("Host", host). End() @@ -73,7 +73,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { host := "rewrite.bar.com" By("creating a regular ingress definition") - ing := framework.NewSingleIngress("kube-lego", "/.well-known/acme/challenge", host, f.IngressController.Namespace, "http-svc", 80, &map[string]string{}) + ing := framework.NewSingleIngress("kube-lego", "/.well-known/acme/challenge", host, f.Namespace, "http-svc", 80, &map[string]string{}) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -83,7 +83,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("making a request to the non-rewritten location") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/.well-known/acme/challenge"). + Get(f.GetURL(framework.HTTP)+"/.well-known/acme/challenge"). Set("Host", host). End() expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/.well-known/acme/challenge", host) @@ -95,7 +95,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { annotations := map[string]string{ "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", } - rewriteIng := framework.NewSingleIngress("rewrite-index", "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + rewriteIng := framework.NewSingleIngress("rewrite-index", "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(rewriteIng) @@ -106,7 +106,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("making a second request to the non-rewritten location") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL+"/.well-known/acme/challenge"). + Get(f.GetURL(framework.HTTP)+"/.well-known/acme/challenge"). Set("Host", host). End() Expect(len(errs)).Should(Equal(0)) @@ -118,7 +118,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { host := "rewrite.bar.com" By("creating a regular ingress definition") - ing := framework.NewSingleIngress("foo", "/foo", host, f.IngressController.Namespace, "http-svc", 80, &map[string]string{}) + ing := framework.NewSingleIngress("foo", "/foo", host, f.Namespace, "http-svc", 80, &map[string]string{}) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -131,7 +131,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { "nginx.ingress.kubernetes.io/use-regex": "true", "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", } - ing = framework.NewSingleIngress("regex", "/foo.+", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing = framework.NewSingleIngress("regex", "/foo.+", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -141,7 +141,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("ensuring '/foo' matches '~* ^/foo'") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo"). + Get(f.GetURL(framework.HTTP)+"/foo"). Set("Host", host). End() expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/foo", host) @@ -151,7 +151,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("ensuring '/foo/bar' matches '~* ^/foo.+'") resp, body, errs = gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar"). Set("Host", host). End() expectBodyRequestURI = fmt.Sprintf("request_uri=http://%v:8080/new/backend", host) @@ -164,7 +164,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { host := "rewrite.bar.com" By("creating a regular ingress definition") - ing := framework.NewSingleIngress("foo", "/foo/bar/bar", host, f.IngressController.Namespace, "http-svc", 80, &map[string]string{}) + ing := framework.NewSingleIngress("foo", "/foo/bar/bar", host, f.Namespace, "http-svc", 80, &map[string]string{}) f.EnsureIngress(ing) By(`creating an ingress definition with the use-regex annotation`) @@ -172,7 +172,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { "nginx.ingress.kubernetes.io/use-regex": "true", "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend", } - ing = framework.NewSingleIngress("regex", "/foo/bar/[a-z]{3}", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing = framework.NewSingleIngress("regex", "/foo/bar/[a-z]{3}", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -182,7 +182,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("check that '/foo/bar/bar' does not match the longest exact path") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar/bar"). Set("Host", host). End() expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/new/backend", host) @@ -199,7 +199,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { "nginx.ingress.kubernetes.io/use-regex": "true", "nginx.ingress.kubernetes.io/rewrite-target": "/new/backend/$1", } - ing := framework.NewSingleIngress("regex", "/foo/bar/(.+)", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress("regex", "/foo/bar/(.+)", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -209,7 +209,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Rewrite", func() { By("check that '/foo/bar/bar' redirects to cusotm rewrite") resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/foo/bar/bar"). + Get(f.GetURL(framework.HTTP)+"/foo/bar/bar"). Set("Host", host). End() expectBodyRequestURI := fmt.Sprintf("request_uri=http://%v:8080/new/backend/bar", host) diff --git a/test/e2e/annotations/satisfy.go b/test/e2e/annotations/satisfy.go new file mode 100644 index 0000000000..faf80d2521 --- /dev/null +++ b/test/e2e/annotations/satisfy.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "net/http" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + extensions "k8s.io/api/extensions/v1beta1" + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Annotations - SATISFY", func() { + f := framework.NewDefaultFramework("satisfy") + + BeforeEach(func() { + f.NewEchoDeployment() + }) + + AfterEach(func() { + }) + + It("should configure satisfy directive correctly", func() { + host := "satisfy" + annotationKey := "nginx.ingress.kubernetes.io/satisfy" + + annotations := map[string]string{ + "any": "any", + "all": "all", + "invalid": "all", + } + + results := map[string]string{ + "any": "satisfy any", + "all": "satisfy all", + "invalid": "satisfy all", + } + + initAnnotations := map[string]string{ + annotationKey: "all", + } + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &initAnnotations) + f.EnsureIngress(ing) + + for key, result := range results { + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { + ingress.ObjectMeta.Annotations[annotationKey] = annotations[key] + return nil + }) + Expect(err).ToNot(HaveOccurred()) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(result)) + }) + + resp, body, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + Expect(body).Should(ContainSubstring(fmt.Sprintf("host=%v", host))) + } + }) +}) diff --git a/test/e2e/annotations/serversnippet.go b/test/e2e/annotations/serversnippet.go index 90bb36a258..6995d6c227 100644 --- a/test/e2e/annotations/serversnippet.go +++ b/test/e2e/annotations/serversnippet.go @@ -42,7 +42,7 @@ var _ = framework.IngressNginxDescribe("Annotations - ServerSnippet", func() { more_set_headers "Content-Type: $content_type";`, } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/snippet.go b/test/e2e/annotations/snippet.go index 1686fb37b3..13da543ae4 100644 --- a/test/e2e/annotations/snippet.go +++ b/test/e2e/annotations/snippet.go @@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Configurationsnippet", fun more_set_headers "Request-Id: $req_id";`, } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/sslciphers.go b/test/e2e/annotations/sslciphers.go index 859444022b..5ea01f57ba 100644 --- a/test/e2e/annotations/sslciphers.go +++ b/test/e2e/annotations/sslciphers.go @@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("Annotations - SSL CIPHERS", func() { "nginx.ingress.kubernetes.io/ssl-ciphers": "ALL:!aNULL:!EXPORT56:RC4+RSA:+HIGH:+MEDIUM:+LOW:+SSLv2:+EXP", } - ing := framework.NewSingleIngress(host, "/something", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/something", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/annotations/upstreamhashby.go b/test/e2e/annotations/upstreamhashby.go index b893c5d33f..59c2c765e2 100644 --- a/test/e2e/annotations/upstreamhashby.go +++ b/test/e2e/annotations/upstreamhashby.go @@ -33,7 +33,7 @@ import ( func startIngress(f *framework.Framework, annotations *map[string]string) map[string]bool { host := "upstream-hash-by.foo.com" - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(server string) bool { @@ -42,7 +42,7 @@ func startIngress(f *framework.Framework, annotations *map[string]string) map[st err := wait.PollImmediate(framework.Poll, framework.DefaultTimeout, func() (bool, error) { resp, _, _ := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() if resp.StatusCode == http.StatusOK { @@ -57,7 +57,7 @@ func startIngress(f *framework.Framework, annotations *map[string]string) map[st for i := 0; i < 100; i++ { _, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() diff --git a/test/e2e/annotations/upstreamvhost.go b/test/e2e/annotations/upstreamvhost.go index ae57894250..594eca9d1d 100644 --- a/test/e2e/annotations/upstreamvhost.go +++ b/test/e2e/annotations/upstreamvhost.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Annotations - Upstreamvhost", func() { "nginx.ingress.kubernetes.io/upstream-vhost": "upstreamvhost.bar.com", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(host, diff --git a/test/e2e/dbg/main.go b/test/e2e/dbg/main.go new file mode 100644 index 0000000000..55dfe3a77a --- /dev/null +++ b/test/e2e/dbg/main.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dbg + +import ( + "encoding/json" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/ingress-nginx/test/e2e/framework" +) + +var _ = framework.IngressNginxDescribe("Debug Tool", func() { + f := framework.NewDefaultFramework("debug-tool") + host := "foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should list the backend servers", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring(host)) + }) + + cmd := "/dbg backends list" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + // Should be 2: the default and the echo deployment + numUpstreams := len(strings.Split(strings.Trim(string(output), "\n"), "\n")) + Expect(numUpstreams).Should(Equal(2)) + + }) + + It("should get information for a specific backend server", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + f.WaitForNginxConfiguration(func(cfg string) bool { + return Expect(cfg).Should(ContainSubstring(host)) + }) + + cmd := "/dbg backends list" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + backends := strings.Split(string(output), "\n") + Expect(len(backends)).Should(BeNumerically(">", 0)) + + getCmd := "/dbg backends get " + backends[0] + output, err = f.ExecIngressPod(getCmd) + + var f map[string]interface{} + unmarshalErr := json.Unmarshal([]byte(output), &f) + Expect(unmarshalErr).Should(BeNil()) + + // Check that the backend we've gotten has the same name as the one we requested + Expect(backends[0]).Should(Equal(f["name"].(string))) + }) + + It("should produce valid JSON for /dbg general", func() { + annotations := map[string]string{} + + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) + f.EnsureIngress(ing) + + cmd := "/dbg general" + output, err := f.ExecIngressPod(cmd) + Expect(err).Should(BeNil()) + + var f interface{} + unmarshalErr := json.Unmarshal([]byte(output), &f) + Expect(unmarshalErr).Should(BeNil()) + }) +}) diff --git a/test/e2e/defaultbackend/custom_default_backend.go b/test/e2e/defaultbackend/custom_default_backend.go index c2104b4154..02319f6121 100644 --- a/test/e2e/defaultbackend/custom_default_backend.go +++ b/test/e2e/defaultbackend/custom_default_backend.go @@ -37,12 +37,12 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { BeforeEach(func() { f.NewEchoDeploymentWithReplicas(1) - framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args - args = append(args, fmt.Sprintf("--default-backend-service=%s/%s", f.IngressController.Namespace, "http-svc")) + args = append(args, fmt.Sprintf("--default-backend-service=%s/%s", f.Namespace, "http-svc")) deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) @@ -54,7 +54,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { }) It("uses custom default backend", func() { - resp, _, errs := gorequest.New().Get(f.IngressController.HTTPURL).End() + resp, _, errs := gorequest.New().Get(f.GetURL(framework.HTTP)).End() Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) }) diff --git a/test/e2e/defaultbackend/default_backend.go b/test/e2e/defaultbackend/default_backend.go index 3e4b6b87b2..04dec9f399 100644 --- a/test/e2e/defaultbackend/default_backend.go +++ b/test/e2e/defaultbackend/default_backend.go @@ -76,9 +76,9 @@ var _ = framework.IngressNginxDescribe("Default backend", func() { switch test.Scheme { case framework.HTTP: - cm = request.CustomMethod(test.Method, f.IngressController.HTTPURL) + cm = request.CustomMethod(test.Method, f.GetURL(framework.HTTP)) case framework.HTTPS: - cm = request.CustomMethod(test.Method, f.IngressController.HTTPSURL) + cm = request.CustomMethod(test.Method, f.GetURL(framework.HTTPS)) // the default backend uses a self generated certificate cm.Transport = &http.Transport{ TLSClientConfig: &tls.Config{ @@ -98,4 +98,35 @@ var _ = framework.IngressNginxDescribe("Default backend", func() { Expect(resp.StatusCode).Should(Equal(test.Status)) } }) + It("enables access logging for default backend", func() { + f.UpdateNginxConfigMapData("enable-access-log-for-default-backend", "true") + host := "foo" + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/somethingOne"). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).To(ContainSubstring("/somethingOne")) + }) + + It("disables access logging for default backend", func() { + f.UpdateNginxConfigMapData("enable-access-log-for-default-backend", "false") + host := "bar" + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)+"/somethingTwo"). + Set("Host", host). + End() + + Expect(len(errs)).Should(Equal(0)) + Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) + + logs, err := f.NginxLogs() + Expect(err).ToNot(HaveOccurred()) + Expect(logs).ToNot(ContainSubstring("/somethingTwo")) + }) }) diff --git a/test/e2e/defaultbackend/ssl.go b/test/e2e/defaultbackend/ssl.go index 80f2be1850..685aa3a3eb 100644 --- a/test/e2e/defaultbackend/ssl.go +++ b/test/e2e/defaultbackend/ssl.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Default backend - SSL", func() { It("should return a self generated SSL certificate", func() { By("checking SSL Certificate using the NGINX IP address") resp, _, errs := gorequest.New(). - Post(f.IngressController.HTTPSURL). + Post(f.GetURL(framework.HTTPS)). TLSClientConfig(&tls.Config{ // the default backend uses a self generated certificate InsecureSkipVerify: true, @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Default backend - SSL", func() { By("checking SSL Certificate using the NGINX catch all server") resp, _, errs = gorequest.New(). - Post(f.IngressController.HTTPSURL). + Post(f.GetURL(framework.HTTPS)). TLSClientConfig(&tls.Config{ // the default backend uses a self generated certificate InsecureSkipVerify: true, diff --git a/test/e2e/defaultbackend/with_hosts.go b/test/e2e/defaultbackend/with_hosts.go new file mode 100644 index 0000000000..3d19c213b3 --- /dev/null +++ b/test/e2e/defaultbackend/with_hosts.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaultbackend + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/parnurzeal/gorequest" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/ingress-nginx/test/e2e/framework" + "net/http" + "strings" +) + +var _ = framework.IngressNginxDescribe("Default backend with hosts", func() { + f := framework.NewDefaultFramework("default-backend-hosts") + host := "foo.com" + + BeforeEach(func() { + f.NewEchoDeploymentWithReplicas(1) + }) + + AfterEach(func() { + }) + + It("should apply the annotation to the default backend", func() { + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/proxy-buffer-size": "8k", + } + + ing := &extensions.Ingress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default-backend-annotations", + Namespace: f.Namespace, + Annotations: annotations, + }, + Spec: extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: "http-svc", + ServicePort: intstr.FromInt(8080), + }, + Rules: []extensions.IngressRule{ + { + Host: host, + }, + }, + }, + } + + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, + func(server string) bool { + return strings.Contains(server, "proxy_buffer_size 8k;") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", "foo.com"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + }) + +}) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index d65428bb14..215371f138 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -32,6 +32,7 @@ import ( // tests to run _ "k8s.io/ingress-nginx/test/e2e/annotations" + _ "k8s.io/ingress-nginx/test/e2e/dbg" _ "k8s.io/ingress-nginx/test/e2e/defaultbackend" _ "k8s.io/ingress-nginx/test/e2e/gracefulshutdown" _ "k8s.io/ingress-nginx/test/e2e/loadbalance" diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index a9cc453159..f7546229b9 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -19,18 +19,15 @@ package e2e import ( "testing" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" - "k8s.io/ingress-nginx/test/e2e/framework" ) func init() { framework.RegisterParseFlags() - if "" == framework.TestContext.KubeConfig { - klog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) - } + // if "" == framework.TestContext.KubeConfig { + // klog.Fatalf("environment variable %v must be set", clientcmd.RecommendedConfigPathEnvVar) + // } } func TestE2E(t *testing.T) { RunE2ETests(t) diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go index 976b2a5ff1..2884da3fe2 100644 --- a/test/e2e/framework/deployment.go +++ b/test/e2e/framework/deployment.go @@ -33,7 +33,14 @@ func (f *Framework) NewEchoDeployment() { // NewEchoDeploymentWithReplicas creates a new deployment of the echoserver image in a particular namespace. Number of // replicas is configurable func (f *Framework) NewEchoDeploymentWithReplicas(replicas int32) { - f.NewDeployment("http-svc", "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, replicas) + f.NewEchoDeploymentWithNameAndReplicas("http-svc", replicas) +} + +// NewEchoDeploymentWithNameAndReplicas creates a new deployment of the echoserver image in a particular namespace. Number of +// replicas is configurable and +// name is configurable +func (f *Framework) NewEchoDeploymentWithNameAndReplicas(name string, replicas int32) { + f.NewDeployment(name, "gcr.io/kubernetes-e2e-test-images/echoserver:2.2", 8080, replicas) } // NewSlowEchoDeployment creates a new deployment of the slow echo server image in a particular namespace. @@ -64,7 +71,7 @@ func (f *Framework) NewDeployment(name, image string, port int32, replicas int32 deployment := &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: extensions.DeploymentSpec{ Replicas: NewInt32(replicas), @@ -108,7 +115,7 @@ func (f *Framework) NewDeployment(name, image string, port int32, replicas int32 service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ @@ -128,6 +135,6 @@ func (f *Framework) NewDeployment(name, image string, port int32, replicas int32 s := f.EnsureService(service) Expect(s).NotTo(BeNil(), "expected a service but none returned") - err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.IngressController.Namespace, int(replicas)) + err = WaitForEndpoints(f.KubeClientSet, DefaultTimeout, name, f.Namespace, int(replicas)) Expect(err).NotTo(HaveOccurred(), "failed to wait for endpoints to become ready") } diff --git a/test/e2e/framework/exec.go b/test/e2e/framework/exec.go index 302a524aa4..1da7f586bc 100644 --- a/test/e2e/framework/exec.go +++ b/test/e2e/framework/exec.go @@ -30,7 +30,7 @@ import ( // ExecIngressPod executes a command inside the first container in ingress controller running pod func (f *Framework) ExecIngressPod(command string) (string, error) { - pod, err := getIngressNGINXPod(f.IngressController.Namespace, f.KubeClientSet) + pod, err := getIngressNGINXPod(f.Namespace, f.KubeClientSet) if err != nil { return "", err } diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go index 3d56ff4931..cbdb454efb 100644 --- a/test/e2e/framework/framework.go +++ b/test/e2e/framework/framework.go @@ -15,7 +15,6 @@ package framework import ( "fmt" - "os" "strings" "time" @@ -65,13 +64,6 @@ type Framework struct { // should abort, the AfterSuite hook should run all Cleanup actions. cleanupHandle CleanupActionHandle - IngressController *ingressController -} - -type ingressController struct { - HTTPURL string - HTTPSURL string - Namespace string } @@ -93,7 +85,11 @@ func (f *Framework) BeforeEach() { f.cleanupHandle = AddCleanupAction(f.AfterEach) By("Creating a kubernetes client") - kubeConfig, err := LoadConfig(TestContext.KubeConfig, TestContext.KubeContext) + kubeConfig, err := restclient.InClusterConfig() + if err != nil { + panic(err.Error()) + } + Expect(err).NotTo(HaveOccurred()) f.KubeConfig = kubeConfig @@ -104,25 +100,17 @@ func (f *Framework) BeforeEach() { ingressNamespace, err := CreateKubeNamespace(f.BaseName, f.KubeClientSet) Expect(err).NotTo(HaveOccurred()) - f.IngressController = &ingressController{ - Namespace: ingressNamespace, - } + f.Namespace = ingressNamespace By("Starting new ingress controller") - err = f.NewIngressController(f.IngressController.Namespace) + err = f.NewIngressController(f.Namespace) Expect(err).NotTo(HaveOccurred()) - err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.IngressController.Namespace, metav1.ListOptions{ + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ LabelSelector: "app.kubernetes.io/name=ingress-nginx", }) Expect(err).NotTo(HaveOccurred()) - HTTPURL := f.GetNginxURL(HTTP) - f.IngressController.HTTPURL = HTTPURL - - HTTPSURL := f.GetNginxURL(HTTPS) - f.IngressController.HTTPSURL = HTTPSURL - // we wait for any change in the informers and SSL certificate generation time.Sleep(5 * time.Second) } @@ -132,7 +120,7 @@ func (f *Framework) AfterEach() { RemoveCleanupAction(f.cleanupHandle) By("Waiting for test namespace to no longer exist") - err := DeleteKubeNamespace(f.KubeClientSet, f.IngressController.Namespace) + err := DeleteKubeNamespace(f.KubeClientSet, f.Namespace) Expect(err).NotTo(HaveOccurred()) if CurrentGinkgoTestDescription().Failed { @@ -148,52 +136,34 @@ func IngressNginxDescribe(text string, body func()) bool { return Describe("[nginx-ingress] "+text, body) } -// GetNginxIP returns the IP address of the minikube cluster -// where the NGINX ingress controller is running +// GetNginxIP returns the number of TCP port where NGINX is running func (f *Framework) GetNginxIP() string { - nodeIP := os.Getenv("NODE_IP") - Expect(nodeIP).NotTo(BeEmpty(), "env variable NODE_IP is empty") - return nodeIP -} - -// GetNginxPort returns the number of TCP port where NGINX is running -func (f *Framework) GetNginxPort(name string) (int, error) { s, err := f.KubeClientSet. CoreV1(). - Services(f.IngressController.Namespace). + Services(f.Namespace). Get("ingress-nginx", metav1.GetOptions{}) - if err != nil { - return -1, err - } - - for _, p := range s.Spec.Ports { - if p.NodePort != 0 && p.Name == name { - return int(p.NodePort), nil - } - } - - return -1, err + Expect(err).NotTo(HaveOccurred(), "unexpected error obtaning NGINX IP address") + return s.Spec.ClusterIP } -// GetNginxURL returns the URL should be used to make a request to NGINX -func (f *Framework) GetNginxURL(scheme RequestScheme) string { +// GetURL returns the URL should be used to make a request to NGINX +func (f *Framework) GetURL(scheme RequestScheme) string { ip := f.GetNginxIP() - port, err := f.GetNginxPort(fmt.Sprintf("%v", scheme)) - Expect(err).NotTo(HaveOccurred(), "unexpected error obtaning NGINX Port") - - return fmt.Sprintf("%v://%v:%v", scheme, ip, port) + return fmt.Sprintf("%v://%v", scheme, ip) } // WaitForNginxServer waits until the nginx configuration contains a particular server section func (f *Framework) WaitForNginxServer(name string, matcher func(cfg string) bool) { - err := wait.Poll(Poll, time.Minute*5, f.matchNginxConditions(name, matcher)) + err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions(name, matcher)) Expect(err).NotTo(HaveOccurred(), "unexpected error waiting for nginx server condition/s") + time.Sleep(5 * time.Second) } // WaitForNginxConfiguration waits until the nginx configuration contains a particular configuration func (f *Framework) WaitForNginxConfiguration(matcher func(cfg string) bool) { - err := wait.Poll(Poll, time.Minute*5, f.matchNginxConditions("", matcher)) + err := wait.Poll(Poll, DefaultTimeout, f.matchNginxConditions("", matcher)) Expect(err).NotTo(HaveOccurred(), "unexpected error waiting for nginx server condition/s") + time.Sleep(5 * time.Second) } func nginxLogs(client kubernetes.Interface, namespace string) (string, error) { @@ -211,14 +181,14 @@ func nginxLogs(client kubernetes.Interface, namespace string) (string, error) { // NginxLogs returns the logs of the nginx ingress controller pod running func (f *Framework) NginxLogs() (string, error) { - return nginxLogs(f.KubeClientSet, f.IngressController.Namespace) + return nginxLogs(f.KubeClientSet, f.Namespace) } func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) bool) wait.ConditionFunc { return func() (bool, error) { - pod, err := getIngressNGINXPod(f.IngressController.Namespace, f.KubeClientSet) + pod, err := getIngressNGINXPod(f.Namespace, f.KubeClientSet) if err != nil { - return false, err + return false, nil } var cmd string @@ -230,7 +200,7 @@ func (f *Framework) matchNginxConditions(name string, matcher func(cfg string) b o, err := f.ExecCommand(pod, cmd) if err != nil { - return false, err + return false, nil } var match bool @@ -264,7 +234,7 @@ func (f *Framework) getNginxConfigMap() (*v1.ConfigMap, error) { config, err := f.KubeClientSet. CoreV1(). - ConfigMaps(f.IngressController.Namespace). + ConfigMaps(f.Namespace). Get("nginx-configuration", metav1.GetOptions{}) if err != nil { return nil, err @@ -298,7 +268,7 @@ func (f *Framework) SetNginxConfigMapData(cmData map[string]string) { _, err = f.KubeClientSet. CoreV1(). - ConfigMaps(f.IngressController.Namespace). + ConfigMaps(f.Namespace). Update(config) Expect(err).NotTo(HaveOccurred()) @@ -318,14 +288,14 @@ func (f *Framework) UpdateNginxConfigMapData(key string, value string) { // DeleteNGINXPod deletes the currently running pod. It waits for the replacement pod to be up. // Grace period to wait for pod shutdown is in seconds. func (f *Framework) DeleteNGINXPod(grace int64) { - ns := f.IngressController.Namespace + ns := f.Namespace pod, err := getIngressNGINXPod(ns, f.KubeClientSet) Expect(err).NotTo(HaveOccurred(), "expected ingress nginx pod to be running") err = f.KubeClientSet.CoreV1().Pods(ns).Delete(pod.GetName(), metav1.NewDeleteOptions(grace)) Expect(err).NotTo(HaveOccurred(), "unexpected error deleting ingress nginx pod") - err = wait.Poll(Poll, time.Minute*5, func() (bool, error) { + err = wait.Poll(Poll, DefaultTimeout, func() (bool, error) { pod, err := getIngressNGINXPod(ns, f.KubeClientSet) if err != nil || pod == nil { return false, nil @@ -432,6 +402,36 @@ func newSingleIngressWithRules(name, path, host, ns, service string, port int, a return newSingleIngress(name, ns, annotations, spec) } +// NewSingleIngressWithBackendAndRules creates an ingress with both a default backend and a rule +func NewSingleIngressWithBackendAndRules(name, path, host, ns, defaultService string, defaultPort int, service string, port int, annotations *map[string]string) *extensions.Ingress { + spec := extensions.IngressSpec{ + Backend: &extensions.IngressBackend{ + ServiceName: defaultService, + ServicePort: intstr.FromInt(defaultPort), + }, + Rules: []extensions.IngressRule{ + { + Host: host, + IngressRuleValue: extensions.IngressRuleValue{ + HTTP: &extensions.HTTPIngressRuleValue{ + Paths: []extensions.HTTPIngressPath{ + { + Path: path, + Backend: extensions.IngressBackend{ + ServiceName: service, + ServicePort: intstr.FromInt(port), + }, + }, + }, + }, + }, + }, + }, + } + + return newSingleIngress(name, ns, annotations, spec) +} + // NewSingleCatchAllIngress creates a simple ingress with a catch-all backend func NewSingleCatchAllIngress(name, ns, service string, port int, annotations *map[string]string) *extensions.Ingress { spec := extensions.IngressSpec{ diff --git a/test/e2e/framework/grpc_fortune_teller.go b/test/e2e/framework/grpc_fortune_teller.go index e6b28ad06a..2293ddf5f3 100644 --- a/test/e2e/framework/grpc_fortune_teller.go +++ b/test/e2e/framework/grpc_fortune_teller.go @@ -38,7 +38,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32 deployment := &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "fortune-teller", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: extensions.DeploymentSpec{ Replicas: NewInt32(replicas), @@ -77,7 +77,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32 Expect(err).NotTo(HaveOccurred()) Expect(d).NotTo(BeNil(), "expected a fortune-teller deployment") - err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.IngressController.Namespace, metav1.ListOptions{ + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, int(replicas), f.Namespace, metav1.ListOptions{ LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), }) Expect(err).NotTo(HaveOccurred(), "failed to wait for to become ready") @@ -85,7 +85,7 @@ func (f *Framework) NewNewGRPCFortuneTellerDeploymentWithReplicas(replicas int32 service := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "fortune-teller", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ diff --git a/test/e2e/framework/influxdb.go b/test/e2e/framework/influxdb.go index d34df549b8..dfb441a66e 100644 --- a/test/e2e/framework/influxdb.go +++ b/test/e2e/framework/influxdb.go @@ -61,7 +61,7 @@ func (f *Framework) NewInfluxDBDeployment() { configuration := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "influxdb-config", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Data: map[string]string{ "influxd.conf": influxConfig, @@ -76,7 +76,7 @@ func (f *Framework) NewInfluxDBDeployment() { deployment := &extensions.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "influxdb-svc", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Spec: extensions.DeploymentSpec{ Replicas: NewInt32(1), @@ -140,7 +140,7 @@ func (f *Framework) NewInfluxDBDeployment() { Expect(d).NotTo(BeNil(), "unexpected error creating deployement for influxdb") - err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.IngressController.Namespace, metav1.ListOptions{ + err = WaitForPodsReady(f.KubeClientSet, DefaultTimeout, 1, f.Namespace, metav1.ListOptions{ LabelSelector: fields.SelectorFromSet(fields.Set(d.Spec.Template.ObjectMeta.Labels)).String(), }) Expect(err).NotTo(HaveOccurred(), "failed to wait for influxdb to become ready") diff --git a/test/e2e/framework/k8s.go b/test/e2e/framework/k8s.go index 2833e86940..de0acd06a0 100644 --- a/test/e2e/framework/k8s.go +++ b/test/e2e/framework/k8s.go @@ -124,7 +124,7 @@ func WaitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, return wait.Poll(2*time.Second, timeout, func() (bool, error) { pl, err := kubeClientSet.CoreV1().Pods(namespace).List(opts) if err != nil { - return false, err + return false, nil } r := 0 @@ -144,14 +144,17 @@ func WaitForPodsReady(kubeClientSet kubernetes.Interface, timeout time.Duration, // WaitForEndpoints waits for a given amount of time until an endpoint contains. func WaitForEndpoints(kubeClientSet kubernetes.Interface, timeout time.Duration, name, ns string, expectedEndpoints int) error { + if expectedEndpoints == 0 { + return nil + } return wait.Poll(2*time.Second, timeout, func() (bool, error) { endpoint, err := kubeClientSet.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) if k8sErrors.IsNotFound(err) { - return false, err + return false, nil } Expect(err).NotTo(HaveOccurred()) if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { - return false, err + return false, nil } r := 0 @@ -188,7 +191,7 @@ func getIngressNGINXPod(ns string, kubeClientSet kubernetes.Interface) (*core.Po LabelSelector: "app.kubernetes.io/name=ingress-nginx", }) if err != nil { - return nil, err + return nil, nil } if len(l.Items) == 0 { diff --git a/test/e2e/framework/ssl.go b/test/e2e/framework/ssl.go index cdfad461b0..35fcb755eb 100644 --- a/test/e2e/framework/ssl.go +++ b/test/e2e/framework/ssl.go @@ -34,7 +34,7 @@ import ( . "github.com/onsi/gomega" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -338,19 +338,24 @@ func tlsConfig(serverName string, pemCA []byte) (*tls.Config, error) { // given URL using the given TLS configuration and returns whether the TLS // handshake completed successfully. func matchTLSServerName(url string, tlsConfig *tls.Config) wait.ConditionFunc { - return func() (ready bool, err error) { + return func() (bool, error) { u, err := net_url.Parse(url) if err != nil { - return + return false, err } - conn, err := tls.Dial("tcp", u.Host, tlsConfig) + port := u.Port() + if port == "" { + port = "443" + } + + conn, err := tls.Dial("tcp", fmt.Sprintf("%v:%v", u.Host, port), tlsConfig) if err != nil { + Logf("Unexpected TLS error: %v", err) return false, nil } conn.Close() - ready = true - return + return true, nil } } diff --git a/test/e2e/framework/test_context.go b/test/e2e/framework/test_context.go index 17ba4ef7e3..d91540cfc6 100644 --- a/test/e2e/framework/test_context.go +++ b/test/e2e/framework/test_context.go @@ -18,17 +18,14 @@ package framework import ( "flag" - "os" "github.com/onsi/ginkgo/config" - - "k8s.io/client-go/tools/clientcmd" ) // TestContextType describes the client context to use in communications with the Kubernetes API. type TestContextType struct { - KubeHost string - KubeConfig string + KubeHost string + //KubeConfig string KubeContext string } @@ -51,7 +48,7 @@ func RegisterCommonFlags() { config.DefaultReporterConfig.SlowSpecThreshold = 20 flag.StringVar(&TestContext.KubeHost, "kubernetes-host", "http://127.0.0.1:8080", "The kubernetes host, or apiserver, to connect to") - flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar) + //flag.StringVar(&TestContext.KubeConfig, "kubernetes-config", os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to config containing embedded authinfo for kubernetes. Default value is from environment variable "+clientcmd.RecommendedConfigPathEnvVar) flag.StringVar(&TestContext.KubeContext, "kubernetes-context", "", "config context to use for kubernetes. If unset, will use value from 'current-context'") } diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 986729f555..2b7025d964 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -23,13 +23,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" "k8s.io/ingress-nginx/internal/file" @@ -37,10 +36,10 @@ import ( const ( // Poll how often to poll for conditions - Poll = 2 * time.Second + Poll = 3 * time.Second // DefaultTimeout time to wait for operations to complete - DefaultTimeout = 5 * time.Minute + DefaultTimeout = 3 * time.Minute ) func nowStamp() string { @@ -87,15 +86,6 @@ func RestclientConfig(config, context string) (*api.Config, error) { return c, nil } -// LoadConfig deserializes the contents of a kubeconfig file into a REST configuration. -func LoadConfig(config, context string) (*rest.Config, error) { - c, err := RestclientConfig(config, context) - if err != nil { - return nil, err - } - return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{}).ClientConfig() -} - // RunID unique identifier of the e2e run var RunID = uuid.NewUUID() @@ -109,8 +99,9 @@ func CreateKubeNamespace(baseName string, c kubernetes.Interface) (string, error } // Be robust about making the namespace creation call. var got *v1.Namespace - err := wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) { - var err error + var err error + + err = wait.PollImmediate(Poll, DefaultTimeout, func() (bool, error) { got, err = c.CoreV1().Namespaces().Create(ns) if err != nil { Logf("Unexpected error while creating namespace: %v", err) @@ -200,7 +191,7 @@ func secretInNamespace(c kubernetes.Interface, namespace, name string) wait.Cond return func() (bool, error) { s, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return false, err + return false, nil } if err != nil { return false, err @@ -268,7 +259,7 @@ func ingressInNamespace(c kubernetes.Interface, namespace, name string) wait.Con return func() (bool, error) { ing, err := c.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - return false, err + return false, nil } if err != nil { return false, err @@ -285,7 +276,7 @@ func podRunning(c kubernetes.Interface, podName, namespace string) wait.Conditio return func() (bool, error) { pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) if err != nil { - return false, err + return false, nil } switch pod.Status.Phase { case v1.PodRunning: diff --git a/test/e2e/gracefulshutdown/slow_requests.go b/test/e2e/gracefulshutdown/slow_requests.go index b98a77ee5a..793881b96f 100644 --- a/test/e2e/gracefulshutdown/slow_requests.go +++ b/test/e2e/gracefulshutdown/slow_requests.go @@ -38,7 +38,7 @@ var _ = framework.IngressNginxDescribe("Graceful Shutdown - Slow Requests", func It("should let slow requests finish before shutting down", func() { host := "graceful-shutdown" - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "slowecho", 8080, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "slowecho", 8080, nil)) f.WaitForNginxConfiguration( func(conf string) bool { return strings.Contains(conf, "worker_shutdown_timeout") @@ -49,7 +49,7 @@ var _ = framework.IngressNginxDescribe("Graceful Shutdown - Slow Requests", func defer func() { done <- true }() defer GinkgoRecover() resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL+"/sleep/30"). + Get(f.GetURL(framework.HTTP)+"/sleep/30"). Set("Host", host). End() Expect(errs).To(BeNil()) diff --git a/test/e2e/loadbalance/round_robin.go b/test/e2e/loadbalance/round_robin.go index aee6579c54..e64abbc13c 100644 --- a/test/e2e/loadbalance/round_robin.go +++ b/test/e2e/loadbalance/round_robin.go @@ -43,7 +43,7 @@ var _ = framework.IngressNginxDescribe("Load Balance - Round Robin", func() { It("should evenly distribute requests with round-robin (default algorithm)", func() { host := "load-balance.com" - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) f.WaitForNginxServer(host, func(server string) bool { return strings.Contains(server, "server_name load-balance.com") @@ -54,7 +54,7 @@ var _ = framework.IngressNginxDescribe("Load Balance - Round Robin", func() { for i := 0; i < 600; i++ { _, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).Should(BeEmpty()) diff --git a/test/e2e/lua/dynamic_certificates.go b/test/e2e/lua/dynamic_certificates.go index 26b9a3ac6a..88c9016398 100644 --- a/test/e2e/lua/dynamic_certificates.go +++ b/test/e2e/lua/dynamic_certificates.go @@ -36,13 +36,13 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { host := "foo.com" BeforeEach(func() { - err := framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args args = append(args, "--enable-dynamic-certificates") args = append(args, "--enable-ssl-chain-completion=false") deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) @@ -57,9 +57,9 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { }) It("picks up the certificate when we add TLS spec to existing ingress", func() { - ensureIngress(f, host) + ensureIngress(f, host, "http-svc") - ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) ing.Spec.TLS = []extensions.IngressTLS{ { @@ -72,20 +72,20 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { ing.Spec.TLS[0].SecretName, ing.Namespace) Expect(err).ToNot(HaveOccurred()) - _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Update(ing) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) Expect(err).ToNot(HaveOccurred()) time.Sleep(waitForLuaSync) - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, host) + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) }) It("picks up the previously missing secret for a given ingress without reloading", func() { - ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) time.Sleep(waitForLuaSync) - ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.IngressController.HTTPSURL), host, "ingress.local") + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, "ingress.local") _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, @@ -105,7 +105,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { time.Sleep(waitForLuaSync) By("serving the configured certificate on HTTPS endpoint") - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, host) + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) log, err := f.NginxLogs() Expect(err).ToNot(HaveOccurred()) @@ -120,11 +120,11 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { Context("given an ingress with TLS correctly configured", func() { BeforeEach(func() { - ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.IngressController.Namespace, "http-svc", 80, nil)) + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) time.Sleep(waitForLuaSync) - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, "ingress.local") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, "ingress.local") _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, @@ -145,14 +145,14 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { time.Sleep(waitForLuaSync) By("serving the configured certificate on HTTPS endpoint") - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, host) + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) }) It("picks up the updated certificate without reloading", func() { - ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) - ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.IngressController.HTTPSURL), host, host) + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host) _, err = framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, @@ -173,7 +173,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { time.Sleep(waitForLuaSync) By("serving the configured certificate on HTTPS endpoint") - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, host) + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, host) log, err := f.NginxLogs() Expect(err).ToNot(HaveOccurred()) @@ -187,9 +187,9 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { }) It("falls back to using default certificate when secret gets deleted without reloading", func() { - ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) - ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.IngressController.HTTPSURL), host, host) + ensureHTTPSRequest(fmt.Sprintf("%s?id=dummy_log_splitter_foo_bar", f.GetURL(framework.HTTPS)), host, host) f.KubeClientSet.CoreV1().Secrets(ing.Namespace).Delete(ing.Spec.TLS[0].SecretName, nil) Expect(err).ToNot(HaveOccurred()) @@ -207,7 +207,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { time.Sleep(waitForLuaSync) By("serving the default certificate on HTTPS endpoint") - ensureHTTPSRequest(f.IngressController.HTTPSURL, host, "ingress.local") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), host, "ingress.local") log, err := f.NginxLogs() Expect(err).ToNot(HaveOccurred()) @@ -222,22 +222,22 @@ var _ = framework.IngressNginxDescribe("Dynamic Certificate", func() { It("picks up a non-certificate only change", func() { newHost := "foo2.com" - ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) ing.Spec.Rules[0].Host = newHost - _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Update(ing) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) Expect(err).ToNot(HaveOccurred()) time.Sleep(waitForLuaSync) By("serving the configured certificate on HTTPS endpoint") - ensureHTTPSRequest(f.IngressController.HTTPSURL, newHost, "ingress.local") + ensureHTTPSRequest(f.GetURL(framework.HTTPS), newHost, "ingress.local") }) It("removes HTTPS configuration when we delete TLS spec", func() { - ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) ing.Spec.TLS = []extensions.IngressTLS{} - _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Update(ing) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ing) Expect(err).ToNot(HaveOccurred()) time.Sleep(waitForLuaSync) diff --git a/test/e2e/lua/dynamic_configuration.go b/test/e2e/lua/dynamic_configuration.go index df6dc704d2..a5eb34d53f 100644 --- a/test/e2e/lua/dynamic_configuration.go +++ b/test/e2e/lua/dynamic_configuration.go @@ -31,6 +31,7 @@ import ( extensions "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/ingress-nginx/internal/nginx" "k8s.io/ingress-nginx/test/e2e/framework" ) @@ -48,7 +49,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { BeforeEach(func() { f.NewEchoDeploymentWithReplicas(1) - ensureIngress(f, "foo.com") + ensureIngress(f, "foo.com", "http-svc") }) It("configures balancer Lua middleware correctly", func() { @@ -78,7 +79,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { }) replicas := 2 - err := framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "http-svc", replicas, nil) + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", replicas, nil) Expect(err).NotTo(HaveOccurred()) time.Sleep(waitForLuaSync) @@ -92,6 +93,60 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { Expect(nginxConfig).Should(Equal(newNginxConfig)) }) + It("handles endpoints only changes (down scaling of replicas)", func() { + var nginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + nginxConfig = cfg + return true + }) + + replicas := 2 + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + ensureRequest(f, "foo.com") + + var newNginxConfig string + f.WaitForNginxConfiguration(func(cfg string) bool { + newNginxConfig = cfg + return true + }) + Expect(nginxConfig).Should(Equal(newNginxConfig)) + + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "http-svc", 0, nil) + + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + ensureRequestWithStatus(f, "foo.com", 503) + }) + + It("handles endpoints only changes consistently (down scaling of replicas vs. empty service)", func() { + deploymentName := "scalingecho" + f.NewEchoDeploymentWithNameAndReplicas(deploymentName, 0) + createIngress(f, "scaling.foo.com", deploymentName) + originalResponseCode := runRequest(f, "scaling.foo.com") + + replicas := 2 + err := framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + expectedSuccessResponseCode := runRequest(f, "scaling.foo.com") + + replicas = 0 + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, deploymentName, replicas, nil) + Expect(err).NotTo(HaveOccurred()) + time.Sleep(waitForLuaSync * 2) + + expectedFailureResponseCode := runRequest(f, "scaling.foo.com") + + Expect(originalResponseCode).To(Equal(503), "Expected empty service to return 503 response.") + Expect(expectedFailureResponseCode).To(Equal(503), "Expected downscaled replicaset to return 503 response.") + Expect(expectedSuccessResponseCode).To(Equal(200), "Expected intermediate scaled replicaset to return a 200 response.") + }) + It("handles an annotation change", func() { var nginxConfig string f.WaitForNginxConfiguration(func(cfg string) bool { @@ -99,11 +154,11 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { return true }) - ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get("foo.com", metav1.GetOptions{}) + ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get("foo.com", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) ingress.ObjectMeta.Annotations["nginx.ingress.kubernetes.io/load-balance"] = "round_robin" - _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Update(ingress) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingress) Expect(err).ToNot(HaveOccurred()) time.Sleep(waitForLuaSync) @@ -126,7 +181,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { return true }) - ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Get("foo.com", metav1.GetOptions{}) + ingress, err := f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Get("foo.com", metav1.GetOptions{}) Expect(err).ToNot(HaveOccurred()) ingress.Spec.TLS = []extensions.IngressTLS{ { @@ -139,7 +194,7 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { ingress.Spec.TLS[0].SecretName, ingress.Namespace) Expect(err).ToNot(HaveOccurred()) - _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.IngressController.Namespace).Update(ingress) + _, err = f.KubeClientSet.ExtensionsV1beta1().Ingresses(f.Namespace).Update(ingress) Expect(err).ToNot(HaveOccurred()) var newNginxConfig string @@ -151,22 +206,33 @@ var _ = framework.IngressNginxDescribe("Dynamic Configuration", func() { }) It("sets controllerPodsCount in Lua general configuration", func() { - output, err := f.ExecIngressPod("curl --fail --silent http://127.0.0.1:18080/configuration/general") + // https://github.com/curl/curl/issues/936 + curlCmd := fmt.Sprintf("curl --fail --silent --unix-socket %v http://localhost/configuration/general", nginx.StatusSocket) + + output, err := f.ExecIngressPod(curlCmd) Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(`{"controllerPodsCount":1}`)) - err = framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 3, nil) + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 3, nil) Expect(err).ToNot(HaveOccurred()) time.Sleep(waitForLuaSync) - output, err = f.ExecIngressPod("curl --fail --silent http://127.0.0.1:18080/configuration/general") + output, err = f.ExecIngressPod(curlCmd) Expect(err).ToNot(HaveOccurred()) Expect(output).Should(Equal(`{"controllerPodsCount":3}`)) }) }) -func ensureIngress(f *framework.Framework, host string) *extensions.Ingress { - ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, +func ensureIngress(f *framework.Framework, host string, deploymentName string) *extensions.Ingress { + ing := createIngress(f, host, deploymentName) + time.Sleep(waitForLuaSync) + ensureRequest(f, host) + + return ing +} + +func createIngress(f *framework.Framework, host string, deploymentName string) *extensions.Ingress { + ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, deploymentName, 80, &map[string]string{"nginx.ingress.kubernetes.io/load-balance": "ewma"})) f.WaitForNginxServer(host, @@ -174,21 +240,37 @@ func ensureIngress(f *framework.Framework, host string) *extensions.Ingress { return strings.Contains(server, fmt.Sprintf("server_name %s ;", host)) && strings.Contains(server, "proxy_pass http://upstream_balancer;") }) - time.Sleep(waitForLuaSync) - ensureRequest(f, host) return ing } func ensureRequest(f *framework.Framework, host string) { resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) } +func ensureRequestWithStatus(f *framework.Framework, host string, statusCode int) { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(statusCode)) +} + +func runRequest(f *framework.Framework, host string) int { + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + Expect(errs).Should(BeEmpty()) + return resp.StatusCode +} + func ensureHTTPSRequest(url string, host string, expectedDNSName string) { resp, _, errs := gorequest.New(). Get(url). diff --git a/test/e2e/servicebackend/service_backend.go b/test/e2e/servicebackend/service_backend.go index 3df7e89964..a508eeb140 100644 --- a/test/e2e/servicebackend/service_backend.go +++ b/test/e2e/servicebackend/service_backend.go @@ -44,7 +44,7 @@ var _ = framework.IngressNginxDescribe("Service backend - 503", func() { It("should return 503 when backend service does not exist", func() { host := "nonexistent.svc.com" - bi := buildIngressWithNonexistentService(host, f.IngressController.Namespace, "/") + bi := buildIngressWithNonexistentService(host, f.Namespace, "/") f.EnsureIngress(bi) f.WaitForNginxServer(host, @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Service backend - 503", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).Should(BeEmpty()) @@ -63,7 +63,7 @@ var _ = framework.IngressNginxDescribe("Service backend - 503", func() { It("should return 503 when all backend service endpoints are unavailable", func() { host := "unavailable.svc.com" - bi, bs := buildIngressWithUnavailableServiceEndpoints(host, f.IngressController.Namespace, "/") + bi, bs := buildIngressWithUnavailableServiceEndpoints(host, f.Namespace, "/") svc := f.EnsureService(bs) Expect(svc).NotTo(BeNil()) @@ -76,7 +76,7 @@ var _ = framework.IngressNginxDescribe("Service backend - 503", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).Should(BeEmpty()) diff --git a/test/e2e/settings/configmap_change.go b/test/e2e/settings/configmap_change.go index 4e19ca90d7..e19fdf319c 100644 --- a/test/e2e/settings/configmap_change.go +++ b/test/e2e/settings/configmap_change.go @@ -39,7 +39,7 @@ var _ = framework.IngressNginxDescribe("Configmap change", func() { It("should reload after an update in the configuration", func() { host := "configmap-change" - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) wlKey := "whitelist-source-range" diff --git a/test/e2e/settings/disable_catch_all.go b/test/e2e/settings/disable_catch_all.go index fefc5183eb..32c3c4fbf8 100644 --- a/test/e2e/settings/disable_catch_all.go +++ b/test/e2e/settings/disable_catch_all.go @@ -37,12 +37,12 @@ var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { BeforeEach(func() { f.NewEchoDeploymentWithReplicas(1) - framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args args = append(args, "--disable-catch-all=true") deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) @@ -54,10 +54,10 @@ var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { It("should ignore catch all Ingress", func() { host := "foo" - ing := framework.NewSingleCatchAllIngress("catch-all", f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleCatchAllIngress("catch-all", f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) - ing = framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil) + ing = framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, func(cfg string) bool { @@ -73,7 +73,7 @@ var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { It("should delete Ingress updated to catch-all", func() { host := "foo" - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -82,13 +82,13 @@ var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).To(BeNil()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) - err := framework.UpdateIngress(f.KubeClientSet, f.IngressController.Namespace, host, func(ingress *extensions.Ingress) error { + err := framework.UpdateIngress(f.KubeClientSet, f.Namespace, host, func(ingress *extensions.Ingress) error { ingress.Spec.Rules = nil ingress.Spec.Backend = &extensions.IngressBackend{ ServiceName: "http-svc", @@ -103,10 +103,30 @@ var _ = framework.IngressNginxDescribe("Disabled catch-all", func() { }) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).To(BeNil()) Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) }) + + It("should allow Ingress with both a default backend and rules", func() { + host := "foo" + + ing := framework.NewSingleIngressWithBackendAndRules("not-catch-all", "/rulepath", host, f.Namespace, "http-svc", 80, "http-svc", 80, nil) + f.EnsureIngress(ing) + + f.WaitForNginxServer(host, func(cfg string) bool { + return strings.Contains(cfg, "server_name foo") + }) + + resp, _, errs := gorequest.New(). + Get(f.GetURL(framework.HTTP)). + Set("Host", host). + End() + + Expect(errs).To(BeNil()) + Expect(resp.StatusCode).Should(Equal(http.StatusOK)) + + }) }) diff --git a/test/e2e/settings/forwarded_headers.go b/test/e2e/settings/forwarded_headers.go index b08ee075ee..4bbdf952f1 100644 --- a/test/e2e/settings/forwarded_headers.go +++ b/test/e2e/settings/forwarded_headers.go @@ -46,7 +46,7 @@ var _ = framework.IngressNginxDescribe("X-Forwarded headers", func() { f.UpdateNginxConfigMapData(setting, "true") - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxServer(host, @@ -55,7 +55,7 @@ var _ = framework.IngressNginxDescribe("X-Forwarded headers", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("X-Forwarded-Port", "1234"). Set("X-Forwarded-Proto", "myproto"). @@ -76,7 +76,7 @@ var _ = framework.IngressNginxDescribe("X-Forwarded headers", func() { f.UpdateNginxConfigMapData(setting, "false") - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) f.WaitForNginxServer(host, func(server string) bool { @@ -84,7 +84,7 @@ var _ = framework.IngressNginxDescribe("X-Forwarded headers", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("X-Forwarded-Port", "1234"). Set("X-Forwarded-Proto", "myproto"). diff --git a/test/e2e/settings/geoip2.go b/test/e2e/settings/geoip2.go index 58f657aff7..6c464c78de 100644 --- a/test/e2e/settings/geoip2.go +++ b/test/e2e/settings/geoip2.go @@ -61,7 +61,7 @@ var _ = framework.IngressNginxDescribe("Geoip2", func() { "nginx.ingress.kubernetes.io/configuration-snippet": configSnippet, } - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations)) f.WaitForNginxConfiguration( func(cfg string) bool { @@ -71,7 +71,7 @@ var _ = framework.IngressNginxDescribe("Geoip2", func() { // Should be blocked usIP := "8.8.8.8" resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("X-Forwarded-For", usIP). End() @@ -81,7 +81,7 @@ var _ = framework.IngressNginxDescribe("Geoip2", func() { // Shouldn't be blocked australianIP := "1.1.1.1" resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("X-Forwarded-For", australianIP). End() diff --git a/test/e2e/settings/global_access_block.go b/test/e2e/settings/global_access_block.go index 924283b18b..3e1abb94fc 100644 --- a/test/e2e/settings/global_access_block.go +++ b/test/e2e/settings/global_access_block.go @@ -34,7 +34,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { BeforeEach(func() { f.NewEchoDeploymentWithReplicas(1) - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) }) AfterEach(func() { @@ -53,7 +53,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { // This test works for minikube, but may have problems with real kubernetes clusters, // especially if connection is done via Internet. In this case, the test should be disabled/removed. resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).To(BeNil()) @@ -71,7 +71,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { // Should be blocked resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"). End() @@ -79,7 +79,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("User-Agent", "AlphaBot"). End() @@ -88,7 +88,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { // Shouldn't be blocked resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("User-Agent", "Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1"). End() @@ -107,7 +107,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { // Should be blocked resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("Referer", "example.com"). End() @@ -115,7 +115,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { Expect(resp.StatusCode).Should(Equal(http.StatusForbidden)) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("Referer", "qwerty"). End() @@ -124,7 +124,7 @@ var _ = framework.IngressNginxDescribe("Global access block", func() { // Shouldn't be blocked resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). Set("Referer", "qwerty123"). End() diff --git a/test/e2e/settings/ingress_class.go b/test/e2e/settings/ingress_class.go index 69032d3083..772429cd2c 100644 --- a/test/e2e/settings/ingress_class.go +++ b/test/e2e/settings/ingress_class.go @@ -45,11 +45,11 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { annotations := map[string]string{ "kubernetes.io/ingress.class": "testclass", } - ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) validHost := "bar" - ing = framework.NewSingleIngress(validHost, "/", validHost, f.IngressController.Namespace, "http-svc", 80, nil) + ing = framework.NewSingleIngress(validHost, "/", validHost, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) f.WaitForNginxConfiguration(func(cfg string) bool { @@ -58,14 +58,14 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", invalidHost). End() Expect(errs).To(BeNil()) Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", validHost). End() Expect(errs).To(BeNil()) @@ -75,12 +75,12 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { Context("With a specific ingress-class", func() { BeforeEach(func() { - framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args args = append(args, "--ingress-class=testclass") deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) @@ -89,14 +89,14 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { It("should ignore Ingress with no class", func() { invalidHost := "bar" - ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.IngressController.Namespace, "http-svc", 80, nil) + ing := framework.NewSingleIngress(invalidHost, "/", invalidHost, f.Namespace, "http-svc", 80, nil) f.EnsureIngress(ing) validHost := "foo" annotations := map[string]string{ "kubernetes.io/ingress.class": "testclass", } - ing = framework.NewSingleIngress(validHost, "/", validHost, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing = framework.NewSingleIngress(validHost, "/", validHost, f.Namespace, "http-svc", 80, &annotations) f.EnsureIngress(ing) f.WaitForNginxServer(validHost, func(cfg string) bool { @@ -108,14 +108,14 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", validHost). End() Expect(errs).To(BeNil()) Expect(resp.StatusCode).Should(Equal(http.StatusOK)) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", invalidHost). End() Expect(errs).To(BeNil()) @@ -127,7 +127,7 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { annotations := map[string]string{ "kubernetes.io/ingress.class": "testclass", } - ing := framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, &annotations) + ing := framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, &annotations) ing = f.EnsureIngress(ing) f.WaitForNginxServer(host, func(cfg string) bool { @@ -135,7 +135,7 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).To(BeNil()) @@ -149,7 +149,7 @@ var _ = framework.IngressNginxDescribe("Ingress class", func() { }) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() Expect(errs).To(BeNil()) diff --git a/test/e2e/settings/no_auth_locations.go b/test/e2e/settings/no_auth_locations.go index 546df30432..49aa3af076 100644 --- a/test/e2e/settings/no_auth_locations.go +++ b/test/e2e/settings/no_auth_locations.go @@ -45,11 +45,11 @@ var _ = framework.IngressNginxDescribe("No Auth locations", func() { BeforeEach(func() { f.NewEchoDeployment() - s := f.EnsureSecret(buildSecret(username, password, secretName, f.IngressController.Namespace)) + s := f.EnsureSecret(buildSecret(username, password, secretName, f.Namespace)) f.UpdateNginxConfigMapData(setting, noAuthPath) - bi := buildBasicAuthIngressWithSecondPath(host, f.IngressController.Namespace, s.Name, noAuthPath) + bi := buildBasicAuthIngressWithSecondPath(host, f.Namespace, s.Name, noAuthPath) f.EnsureIngress(bi) }) @@ -63,7 +63,7 @@ var _ = framework.IngressNginxDescribe("No Auth locations", func() { }) resp, body, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). End() @@ -79,7 +79,7 @@ var _ = framework.IngressNginxDescribe("No Auth locations", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", host). SetBasicAuth(username, password). End() @@ -95,7 +95,7 @@ var _ = framework.IngressNginxDescribe("No Auth locations", func() { }) resp, _, errs := gorequest.New(). - Get(fmt.Sprintf("%s/noauth", f.IngressController.HTTPURL)). + Get(fmt.Sprintf("%s/noauth", f.GetURL(framework.HTTP))). Set("Host", host). End() diff --git a/test/e2e/settings/pod_security_policy.go b/test/e2e/settings/pod_security_policy.go index 214b1ee88d..eec563e617 100644 --- a/test/e2e/settings/pod_security_policy.go +++ b/test/e2e/settings/pod_security_policy.go @@ -63,12 +63,12 @@ var _ = framework.IngressNginxDescribe("[Serial] Pod Security Policies", func() Expect(err).NotTo(HaveOccurred(), "updating ingress controller cluster role to use a pod security policy") // update the deployment just to trigger a rolling update and the use of the security policy - err = framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args args = append(args, "--v=2") deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) @@ -108,7 +108,7 @@ var _ = framework.IngressNginxDescribe("[Serial] Pod Security Policies", func() }) resp, _, _ := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). Set("Host", "foo.bar.com"). End() Expect(resp.StatusCode).Should(Equal(http.StatusNotFound)) diff --git a/test/e2e/settings/proxy_host.go b/test/e2e/settings/proxy_host.go index 2ae17b49e3..d9789e7fd0 100644 --- a/test/e2e/settings/proxy_host.go +++ b/test/e2e/settings/proxy_host.go @@ -20,6 +20,7 @@ import ( "fmt" "net/http" "strings" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -37,11 +38,11 @@ var _ = framework.IngressNginxDescribe("Proxy host variable", func() { }) It("should exist a proxy_host", func() { - upstreamName := fmt.Sprintf("%v-http-svc-80", f.IngressController.Namespace) + upstreamName := fmt.Sprintf("%v-http-svc-80", f.Namespace) annotations := map[string]string{ "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Custom-Header: $proxy_host"`, } - f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.IngressController.Namespace, "http-svc", 80, &annotations)) + f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.Namespace, "http-svc", 80, &annotations)) f.WaitForNginxConfiguration( func(server string) bool { @@ -50,7 +51,8 @@ var _ = framework.IngressNginxDescribe("Proxy host variable", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", test). End() @@ -60,13 +62,13 @@ var _ = framework.IngressNginxDescribe("Proxy host variable", func() { }) It("should exist a proxy_host using the upstream-vhost annotation value", func() { - upstreamName := fmt.Sprintf("%v-http-svc-80", f.IngressController.Namespace) + upstreamName := fmt.Sprintf("%v-http-svc-80", f.Namespace) upstreamVHost := "different.host" annotations := map[string]string{ "nginx.ingress.kubernetes.io/upstream-vhost": upstreamVHost, "nginx.ingress.kubernetes.io/configuration-snippet": `more_set_headers "Custom-Header: $proxy_host"`, } - f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.IngressController.Namespace, "http-svc", 80, &annotations)) + f.EnsureIngress(framework.NewSingleIngress(test, "/", test, f.Namespace, "http-svc", 80, &annotations)) f.WaitForNginxConfiguration( func(server string) bool { @@ -75,7 +77,8 @@ var _ = framework.IngressNginxDescribe("Proxy host variable", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPURL). + Get(f.GetURL(framework.HTTP)). + Retry(10, 1*time.Second, http.StatusNotFound). Set("Host", test). End() diff --git a/test/e2e/settings/proxy_protocol.go b/test/e2e/settings/proxy_protocol.go index 8a29565156..95fdde595b 100644 --- a/test/e2e/settings/proxy_protocol.go +++ b/test/e2e/settings/proxy_protocol.go @@ -20,7 +20,6 @@ import ( "fmt" "io/ioutil" "net" - "strconv" "strings" . "github.com/onsi/ginkgo" @@ -47,7 +46,7 @@ var _ = framework.IngressNginxDescribe("Proxy Protocol", func() { f.UpdateNginxConfigMapData(setting, "true") - f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) f.WaitForNginxServer(host, func(server string) bool { @@ -56,11 +55,9 @@ var _ = framework.IngressNginxDescribe("Proxy Protocol", func() { }) ip := f.GetNginxIP() - port, err := f.GetNginxPort("http") - Expect(err).NotTo(HaveOccurred(), "unexpected error obtaning NGINX Port") - conn, err := net.Dial("tcp", net.JoinHostPort(ip, strconv.Itoa(port))) - Expect(err).NotTo(HaveOccurred(), "unexpected error creating connection to %s:%d", ip, port) + conn, err := net.Dial("tcp", net.JoinHostPort(ip, "80")) + Expect(err).NotTo(HaveOccurred(), "unexpected error creating connection to %s:80", ip) defer conn.Close() header := "PROXY TCP4 192.168.0.1 192.168.0.11 56324 1234\r\n" diff --git a/test/e2e/settings/server_tokens.go b/test/e2e/settings/server_tokens.go index 2189f2f798..d1abe760cd 100644 --- a/test/e2e/settings/server_tokens.go +++ b/test/e2e/settings/server_tokens.go @@ -41,7 +41,7 @@ var _ = framework.IngressNginxDescribe("Server Tokens", func() { It("should not exists Server header in the response", func() { f.UpdateNginxConfigMapData(serverTokens, "false") - f.EnsureIngress(framework.NewSingleIngress(serverTokens, "/", serverTokens, f.IngressController.Namespace, "http-svc", 80, nil)) + f.EnsureIngress(framework.NewSingleIngress(serverTokens, "/", serverTokens, f.Namespace, "http-svc", 80, nil)) f.WaitForNginxConfiguration( func(cfg string) bool { @@ -56,7 +56,7 @@ var _ = framework.IngressNginxDescribe("Server Tokens", func() { f.EnsureIngress(&v1beta1.Ingress{ ObjectMeta: metav1.ObjectMeta{ Name: serverTokens, - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, Annotations: map[string]string{}, }, Spec: v1beta1.IngressSpec{ diff --git a/test/e2e/settings/tls.go b/test/e2e/settings/tls.go index 95101ce2b1..67a7b45074 100644 --- a/test/e2e/settings/tls.go +++ b/test/e2e/settings/tls.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" "strings" + "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -29,12 +30,17 @@ import ( "k8s.io/ingress-nginx/test/e2e/framework" ) +func noRedirectPolicyFunc(gorequest.Request, []gorequest.Request) error { + return http.ErrUseLastResponse +} + var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { f := framework.NewDefaultFramework("settings-tls") host := "settings-tls" BeforeEach(func() { f.NewEchoDeployment() + f.UpdateNginxConfigMapData("use-forwarded-headers", "false") }) AfterEach(func() { @@ -48,14 +54,14 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { // https://www.openssl.org/docs/man1.1.0/apps/ciphers.html - "CIPHER SUITE NAMES" testCiphers := "ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA" - ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.IngressController.Namespace, "http-svc", 80, nil)) + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, ing.Spec.TLS[0].SecretName, ing.Namespace) Expect(err).NotTo(HaveOccurred()) - framework.WaitForTLS(f.IngressController.HTTPSURL, tlsConfig) + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) By("setting cipher suite") f.UpdateNginxConfigMapData(sslCiphers, testCiphers) @@ -66,7 +72,7 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). TLSClientConfig(tlsConfig). Set("Host", host). End() @@ -85,7 +91,7 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { }) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). TLSClientConfig(tlsConfig). Set("Host", host). End() @@ -101,14 +107,14 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { hstsIncludeSubdomains := "hsts-include-subdomains" hstsPreload := "hsts-preload" - ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.IngressController.Namespace, "http-svc", 80, nil)) + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, ing.Spec.TLS[0].SecretName, ing.Namespace) Expect(err).NotTo(HaveOccurred()) - framework.WaitForTLS(f.IngressController.HTTPSURL, tlsConfig) + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) By("setting max-age parameter") f.UpdateNginxConfigMapData(hstsMaxAge, "86400") @@ -119,7 +125,7 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { }) resp, _, errs := gorequest.New(). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). TLSClientConfig(tlsConfig). Set("Host", host). End() @@ -137,7 +143,7 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { }) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). TLSClientConfig(tlsConfig). Set("Host", host). End() @@ -155,7 +161,7 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { }) resp, _, errs = gorequest.New(). - Get(f.IngressController.HTTPSURL). + Get(f.GetURL(framework.HTTPS)). TLSClientConfig(tlsConfig). Set("Host", host). End() @@ -164,4 +170,63 @@ var _ = framework.IngressNginxDescribe("Settings - TLS)", func() { Expect(resp.StatusCode).Should(Equal(http.StatusOK)) Expect(resp.Header.Get("Strict-Transport-Security")).Should(ContainSubstring("preload")) }) + + It("should not use ports during the HTTP to HTTPS redirection", func() { + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`if ($redirect_to_https) {`)) && + Expect(server).Should(ContainSubstring(`return 308 https://$redirect_host$request_uri;`)) + }) + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf(f.GetURL(framework.HTTP))). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal(fmt.Sprintf("https://%v/", host))) + }) + + It("should not use ports or X-Forwarded-Host during the HTTP to HTTPS redirection", func() { + f.UpdateNginxConfigMapData("use-forwarded-headers", "true") + + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) + tlsConfig, err := framework.CreateIngressTLSSecret(f.KubeClientSet, + ing.Spec.TLS[0].Hosts, + ing.Spec.TLS[0].SecretName, + ing.Namespace) + Expect(err).NotTo(HaveOccurred()) + + framework.WaitForTLS(f.GetURL(framework.HTTPS), tlsConfig) + + f.WaitForNginxServer(host, + func(server string) bool { + return Expect(server).Should(ContainSubstring(`if ($redirect_to_https) {`)) && + Expect(server).Should(ContainSubstring(`return 308 https://$redirect_host$request_uri;`)) + }) + + resp, _, errs := gorequest.New(). + Get(fmt.Sprintf(f.GetURL(framework.HTTP))). + Retry(10, 1*time.Second, http.StatusNotFound). + RedirectPolicy(noRedirectPolicyFunc). + Set("Host", host). + Set("X-Forwarded-Host", "example.com:80"). + End() + + Expect(errs).Should(BeEmpty()) + Expect(resp.StatusCode).Should(Equal(http.StatusPermanentRedirect)) + Expect(resp.Header.Get("Location")).Should(Equal("https://example.com/")) + }) }) diff --git a/test/e2e/ssl/secret_update.go b/test/e2e/ssl/secret_update.go index 36026955f2..ed77c6b6cd 100644 --- a/test/e2e/ssl/secret_update.go +++ b/test/e2e/ssl/secret_update.go @@ -45,14 +45,14 @@ var _ = framework.IngressNginxDescribe("SSL", func() { dummySecret := f.EnsureSecret(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "dummy", - Namespace: f.IngressController.Namespace, + Namespace: f.Namespace, }, Data: map[string][]byte{ "key": []byte("value"), }, }) - ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.IngressController.Namespace, "http-svc", 80, nil)) + ing := f.EnsureIngress(framework.NewSingleIngressWithTLS(host, "/", host, []string{host}, f.Namespace, "http-svc", 80, nil)) _, err := framework.CreateIngressTLSSecret(f.KubeClientSet, ing.Spec.TLS[0].Hosts, ing.Spec.TLS[0].SecretName, @@ -69,12 +69,12 @@ var _ = framework.IngressNginxDescribe("SSL", func() { Expect(err).ToNot(HaveOccurred()) Expect(log).ToNot(BeEmpty()) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.IngressController.Namespace))) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))) time.Sleep(5 * time.Second) dummySecret.Data["some-key"] = []byte("some value") - f.KubeClientSet.CoreV1().Secrets(f.IngressController.Namespace).Update(dummySecret) + f.KubeClientSet.CoreV1().Secrets(f.Namespace).Update(dummySecret) time.Sleep(5 * time.Second) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.IngressController.Namespace))) - Expect(log).ToNot(ContainSubstring(fmt.Sprintf("error obtaining PEM from secret %v/dummy", f.IngressController.Namespace))) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("starting syncing of secret %v/dummy", f.Namespace))) + Expect(log).ToNot(ContainSubstring(fmt.Sprintf("error obtaining PEM from secret %v/dummy", f.Namespace))) }) }) diff --git a/test/e2e/status/update.go b/test/e2e/status/update.go index 062c3cb7ee..c28122a0ff 100644 --- a/test/e2e/status/update.go +++ b/test/e2e/status/update.go @@ -49,7 +49,7 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() { port, cmd, err := f.KubectlProxy(0) Expect(err).NotTo(HaveOccurred(), "unexpected error starting kubectl proxy") - err = framework.UpdateDeployment(f.KubeClientSet, f.IngressController.Namespace, "nginx-ingress-controller", 1, + err = framework.UpdateDeployment(f.KubeClientSet, f.Namespace, "nginx-ingress-controller", 1, func(deployment *appsv1beta1.Deployment) error { args := deployment.Spec.Template.Spec.Containers[0].Args args = append(args, fmt.Sprintf("--apiserver-host=http://%s:%d", address.String(), port)) @@ -67,14 +67,14 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() { } deployment.Spec.Template.Spec.Containers[0].Args = args - _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.IngressController.Namespace).Update(deployment) + _, err := f.KubeClientSet.AppsV1beta1().Deployments(f.Namespace).Update(deployment) return err }) Expect(err).NotTo(HaveOccurred(), "unexpected error updating ingress controller deployment flags") f.NewEchoDeploymentWithReplicas(1) - ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.IngressController.Namespace, "http-svc", 80, nil)) + ing := f.EnsureIngress(framework.NewSingleIngress(host, "/", host, f.Namespace, "http-svc", 80, nil)) f.WaitForNginxConfiguration( func(cfg string) bool { @@ -87,16 +87,16 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() { err = cmd.Process.Kill() Expect(err).NotTo(HaveOccurred(), "unexpected error terminating kubectl proxy") - ing, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "unexpected error getting %s/%v Ingress", f.IngressController.Namespace, host) + ing, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred(), "unexpected error getting %s/%v Ingress", f.Namespace, host) ing.Status.LoadBalancer.Ingress = []apiv1.LoadBalancerIngress{} - _, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).UpdateStatus(ing) + _, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).UpdateStatus(ing) Expect(err).NotTo(HaveOccurred(), "unexpected error cleaning Ingress status") time.Sleep(10 * time.Second) err = f.KubeClientSet.CoreV1(). - ConfigMaps(f.IngressController.Namespace). + ConfigMaps(f.Namespace). Delete("ingress-controller-leader-nginx", &metav1.DeleteOptions{}) Expect(err).NotTo(HaveOccurred(), "unexpected error deleting leader election configmap") @@ -110,9 +110,9 @@ var _ = framework.IngressNginxDescribe("Status Update [Status]", func() { }() err = wait.Poll(10*time.Second, framework.DefaultTimeout, func() (done bool, err error) { - ing, err = f.KubeClientSet.Extensions().Ingresses(f.IngressController.Namespace).Get(host, metav1.GetOptions{}) + ing, err = f.KubeClientSet.Extensions().Ingresses(f.Namespace).Get(host, metav1.GetOptions{}) if err != nil { - return false, err + return false, nil } if len(ing.Status.LoadBalancer.Ingress) != 1 { diff --git a/test/e2e/tcpudp/tcp.go b/test/e2e/tcpudp/tcp.go index cebb90be27..8a9fa36b84 100644 --- a/test/e2e/tcpudp/tcp.go +++ b/test/e2e/tcpudp/tcp.go @@ -17,10 +17,14 @@ limitations under the License. package settings import ( + "context" "fmt" + "net" "strings" + "time" "github.com/parnurzeal/gorequest" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -31,6 +35,10 @@ import ( "k8s.io/ingress-nginx/test/e2e/framework" ) +const ( + waitForLuaSync = 5 * time.Second +) + var _ = framework.IngressNginxDescribe("TCP Feature", func() { f := framework.NewDefaultFramework("tcp") @@ -45,7 +53,7 @@ var _ = framework.IngressNginxDescribe("TCP Feature", func() { config, err := f.KubeClientSet. CoreV1(). - ConfigMaps(f.IngressController.Namespace). + ConfigMaps(f.Namespace). Get("tcp-services", metav1.GetOptions{}) Expect(err).To(BeNil(), "unexpected error obtaining tcp-services configmap") Expect(config).NotTo(BeNil(), "expected a configmap but none returned") @@ -54,16 +62,17 @@ var _ = framework.IngressNginxDescribe("TCP Feature", func() { config.Data = map[string]string{} } - config.Data["8080"] = fmt.Sprintf("%v/http-svc:80", f.IngressController.Namespace) + config.Data["8080"] = fmt.Sprintf("%v/http-svc:80", f.Namespace) + _, err = f.KubeClientSet. CoreV1(). - ConfigMaps(f.IngressController.Namespace). + ConfigMaps(f.Namespace). Update(config) Expect(err).NotTo(HaveOccurred(), "unexpected error updating configmap") svc, err := f.KubeClientSet. CoreV1(). - Services(f.IngressController.Namespace). + Services(f.Namespace). Get("ingress-nginx", metav1.GetOptions{}) Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service") Expect(svc).NotTo(BeNil(), "expected a service but none returned") @@ -75,23 +84,109 @@ var _ = framework.IngressNginxDescribe("TCP Feature", func() { }) _, err = f.KubeClientSet. CoreV1(). - Services(f.IngressController.Namespace). + Services(f.Namespace). Update(svc) Expect(err).NotTo(HaveOccurred(), "unexpected error updating service") f.WaitForNginxConfiguration( func(cfg string) bool { - return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-http-svc-80"`, f.IngressController.Namespace)) + return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-http-svc-80"`, f.Namespace)) }) ip := f.GetNginxIP() - port, err := f.GetNginxPort("http-svc") - Expect(err).NotTo(HaveOccurred(), "unexpected error obtaning service port") - resp, _, errs := gorequest.New(). - Get(fmt.Sprintf("http://%v:%v", ip, port)). + Get(fmt.Sprintf("http://%v:8080", ip)). End() Expect(errs).Should(BeEmpty()) Expect(resp.StatusCode).Should(Equal(200)) }) + + It("should expose an ExternalName TCP service", func() { + // Setup: + // - Create an external name service for DNS lookups on port 5353. Point it to google's DNS server + // - Expose port 5353 on the nginx ingress NodePort service to open a hole for this test + // - Update the `tcp-services` configmap to proxy traffic to the configured external name service + + // Create an external service for DNS + externalService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dns-external-name-svc", + Namespace: f.Namespace, + }, + + Spec: corev1.ServiceSpec{ + ExternalName: "google-public-dns-a.google.com", + Ports: []corev1.ServicePort{ + { + Name: "dns-external-name-svc", + Port: 5353, + TargetPort: intstr.FromInt(53), + }, + }, + Type: corev1.ServiceTypeExternalName, + }, + } + f.EnsureService(externalService) + + // Expose the `external name` port on the `ingress-nginx` service + svc, err := f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Get("ingress-nginx", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining ingress-nginx service") + Expect(svc).NotTo(BeNil(), "expected a service but none returned") + + svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{ + Name: "dns-svc", + Port: 5353, + TargetPort: intstr.FromInt(5353), + }) + _, err = f.KubeClientSet. + CoreV1(). + Services(f.Namespace). + Update(svc) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating service") + + // Update the TCP configmap to link port 5353 to the DNS external name service + config, err := f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Get("tcp-services", metav1.GetOptions{}) + Expect(err).To(BeNil(), "unexpected error obtaining tcp-services configmap") + Expect(config).NotTo(BeNil(), "expected a configmap but none returned") + + if config.Data == nil { + config.Data = map[string]string{} + } + + config.Data["5353"] = fmt.Sprintf("%v/dns-external-name-svc:5353", f.Namespace) + + _, err = f.KubeClientSet. + CoreV1(). + ConfigMaps(f.Namespace). + Update(config) + Expect(err).NotTo(HaveOccurred(), "unexpected error updating configmap") + + time.Sleep(waitForLuaSync) + + // Validate that the generated nginx config contains the expected `proxy_upstream_name` value + f.WaitForNginxConfiguration( + func(cfg string) bool { + return strings.Contains(cfg, fmt.Sprintf(`ngx.var.proxy_upstream_name="tcp-%v-dns-external-name-svc-5353"`, f.Namespace)) + }) + + // Execute the test. Use the `external name` service to resolve a domain name. + ip := f.GetNginxIP() + resolver := net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{} + return d.DialContext(ctx, "tcp", fmt.Sprintf("%v:5353", ip)) + }, + } + ips, err := resolver.LookupHost(context.Background(), "google-public-dns-b.google.com") + Expect(err).NotTo(HaveOccurred(), "unexpected error from DNS resolver") + Expect(ips).Should(ContainElement("8.8.4.4")) + + }) }) diff --git a/test/e2e/up.sh b/test/e2e/up.sh index 31a24b33a9..3914cb2c0b 100755 --- a/test/e2e/up.sh +++ b/test/e2e/up.sh @@ -22,13 +22,12 @@ if test -e kubectl; then echo "skipping download of kubectl" else echo "downloading kubectl..." - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl && \ - chmod +x kubectl && sudo mv kubectl /usr/local/bin/ + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.13.3/bin/linux/amd64/kubectl \ + && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ fi -mkdir -p ${HOME}/.kube -touch ${HOME}/.kube/config -export KUBECONFIG=${HOME}/.kube/config +export KUBECONFIG="$(mktemp)" +echo -n ${KUBECONFIG} > /tmp/kubeconfig echo "starting Kubernetes cluster..." K8S_VERSION=v1.11 @@ -54,3 +53,6 @@ make -C ${DIR}/../../ build container echo "copying docker image to cluster..." DEV_IMAGE=${REGISTRY}/nginx-ingress-controller:${TAG} ${DIR}/dind-cluster-v1.11.sh copy-image ${DEV_IMAGE} + +echo "copying e2e docker image to cluster..." +${DIR}/dind-cluster-v1.11.sh copy-image nginx-ingress-controller:e2e diff --git a/test/e2e/wait-for-nginx.sh b/test/e2e/wait-for-nginx.sh index 360a7986da..f7b3ad9dbd 100755 --- a/test/e2e/wait-for-nginx.sh +++ b/test/e2e/wait-for-nginx.sh @@ -15,6 +15,7 @@ # limitations under the License. set -e +set -x DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" @@ -32,8 +33,10 @@ function on_exit { } trap on_exit EXIT -sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/../manifests/ingress-controller/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f - -cat $DIR/../manifests/ingress-controller/service-nodeport.yaml | kubectl apply --namespace=$NAMESPACE -f - +kubectl apply --namespace=$NAMESPACE -f $DIR/manifests/service.yaml + +sed "s@\${NAMESPACE}@${NAMESPACE}@" $DIR/manifests/mandatory.yaml | kubectl apply --namespace=$NAMESPACE -f - +cat $DIR/manifests/service.yaml | kubectl apply --namespace=$NAMESPACE -f - # wait for the deployment and fail if there is an error before starting the execution of any test kubectl rollout status \ diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index 9d91c6339f..b13a50ed1f 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,2 +1,7 @@ language: go -install: go get -t +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index b131069799..02fc81e062 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -2,17 +2,18 @@ A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. - -![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. ## Status -It is ready for production use. It works fine after extensive use in the wild. +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -[![Build Status][1]][2] [![GoDoc][3]][4] [![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -20,19 +21,54 @@ It is ready for production use. It works fine after extensive use in the wild. [4]: https://godoc.org/github.com/imdario/mergo [5]: https://goreportcard.com/badge/imdario/mergo [6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). ### Important note -Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected. +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + ### Mergo in the wild -- [docker/docker](https://github.com/docker/docker/) +- [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) - [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) - [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) - [EagerIO/Stout](https://github.com/EagerIO/Stout) - [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) - [russross/canvasassignments](https://github.com/russross/canvasassignments) @@ -50,7 +86,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor - [thoas/picfit](https://github.com/thoas/picfit) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [Iris Web Framework](https://github.com/kataras/iris) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) ## Installation @@ -63,7 +99,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -71,15 +107,15 @@ if err := mergo.Merge(&dst, src); err != nil { } ``` -Also, you can merge overwriting values using MergeWithOverwrite. +Also, you can merge overwriting values using the transformer `WithOverride`. ```go -if err := mergo.MergeWithOverwrite(&dst, src); err != nil { +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { // ... } ``` -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. ```go if err := mergo.Map(&dst, srcMap); err != nil { @@ -87,7 +123,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { } ``` -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). @@ -111,13 +147,10 @@ func main() { A: "one", B: 2, } - dest := Foo{ A: "two", } - mergo.Merge(&dest, src) - fmt.Println(dest) // Will print // {two 2} @@ -126,7 +159,56 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v1 + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + ## Contact me @@ -136,6 +218,21 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). +## Top Contributors + +[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) +[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) +[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) +[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) +[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) +[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) +[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) +[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) + + ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 99002565f2..3f5afa83a1 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -31,7 +31,8 @@ func isExported(field reflect.StructField) bool { // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr @@ -71,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -97,15 +99,15 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over continue } if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { return } } else { @@ -127,28 +129,35 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, over // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}) error { - return _map(dst, src, false) +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) } -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by // non-empty src attribute values. -func MapWithOverwrite(dst, src interface{}) error { - return _map(dst, src, true) +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) } -func _map(dst, src interface{}, overwrite bool) error { +func _map(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } switch vSrc.Kind() { case reflect.Struct: @@ -162,5 +171,5 @@ func _map(dst, src interface{}, overwrite bool) error { default: return ErrNotSupported } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 052b9fe782..f8de6c5430 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -9,13 +9,14 @@ package mergo import ( + "fmt" "reflect" ) func hasExportedField(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) - if field.Anonymous { + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { exported = exported || hasExportedField(dst.Field(i)) } else { exported = exported || len(field.PkgPath) == 0 @@ -24,10 +25,25 @@ func hasExportedField(dst reflect.Value) (exported bool) { return } +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false + if !src.IsValid() { return } @@ -44,23 +60,30 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // Remember, remember... visited[h] = &visit{addr, typ, seen} } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + switch dst.Kind() { case reflect.Struct: if hasExportedField(dst) { for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } case reflect.Map: - if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 { + if dst.IsNil() && !src.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) - return } for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) @@ -69,7 +92,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov } dstElement := dst.MapIndex(key) switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { continue } @@ -84,36 +107,78 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov case reflect.Ptr: fallthrough case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { return } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } case reflect.Ptr: fallthrough case reflect.Interface: + if src.IsNil() { + break + } if src.Kind() != reflect.Interface { if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, overwrite); err != nil { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { return } } else { @@ -121,17 +186,15 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov } break } - if src.IsNil() { - break - } else if dst.IsNil() || overwrite { + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { return } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } @@ -142,26 +205,51 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, ov // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}) error { - return merge(dst, src, false) +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. -func MergeWithOverwrite(dst, src interface{}) error { - return merge(dst, src, true) +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true } -func merge(dst, src interface{}, overwrite bool) error { +func merge(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 79ccdf5cb0..a82fea2fdc 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -32,7 +32,7 @@ type visit struct { next *visit } -// From src/pkg/encoding/json. +// From src/pkg/encoding/json/encode.go. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: @@ -45,8 +45,15 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Ptr, reflect.Func: + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: return v.IsNil() + case reflect.Invalid: + return true } return false } diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 0000000000..5f0d1fb6a7 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 Alan Shreve + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 0000000000..7a950d1774 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 0000000000..9d2d8a4bab --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,15 @@ +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 0000000000..336142a5e3 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,98 @@ +// +build windows +// +build !go1.4 + +package mousetrap + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const ( + // defined by the Win32 API + th32cs_snapprocess uintptr = 0x2 +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot") + Process32First = kernel.MustFindProc("Process32FirstW") + Process32Next = kernel.MustFindProc("Process32NextW") +) + +// ProcessEntry32 structure defined by the Win32 API +type processEntry32 struct { + dwSize uint32 + cntUsage uint32 + th32ProcessID uint32 + th32DefaultHeapID int + th32ModuleID uint32 + cntThreads uint32 + th32ParentProcessID uint32 + pcPriClassBase int32 + dwFlags uint32 + szExeFile [syscall.MAX_PATH]uint16 +} + +func getProcessEntry(pid int) (pe *processEntry32, err error) { + snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0)) + if snapshot == uintptr(syscall.InvalidHandle) { + err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1) + return + } + defer syscall.CloseHandle(syscall.Handle(snapshot)) + + var processEntry processEntry32 + processEntry.dwSize = uint32(unsafe.Sizeof(processEntry)) + ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32First: %v", e1) + return + } + + for { + if processEntry.th32ProcessID == uint32(pid) { + pe = &processEntry + return + } + + ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry))) + if ok == 0 { + err = fmt.Errorf("Process32Next: %v", e1) + return + } + } +} + +func getppid() (pid int, err error) { + pe, err := getProcessEntry(os.Getpid()) + if err != nil { + return + } + + pid = int(pe.th32ParentProcessID) + return +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + ppid, err := getppid() + if err != nil { + return false + } + + pe, err := getProcessEntry(ppid) + if err != nil { + return false + } + + name := syscall.UTF16ToString(pe.szExeFile[:]) + return name == "explorer.exe" +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go new file mode 100644 index 0000000000..9a28e57c3c --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go @@ -0,0 +1,46 @@ +// +build windows +// +build go1.4 + +package mousetrap + +import ( + "os" + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(os.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 0000000000..1b8c7c2611 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,36 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + +*.exe + +cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 0000000000..94ec53068a --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 0000000000..5afcb20961 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,21 @@ +language: go + +matrix: + include: + - go: 1.9.4 + - go: 1.10.0 + - go: tip + allow_failures: + - go: tip + +before_install: + - mkdir -p bin + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + - chmod +x bin/shellcheck +script: + - PATH=$PATH:$PWD/bin go test -v ./... + - go build + - diff -u <(echo -n) <(gofmt -d -s .) + - if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 0000000000..298f0e2665 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 0000000000..851fcc087c --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,736 @@ +![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) + +Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. + +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Moby (former Docker)](https://github.com/moby/moby) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) +* [rclone](http://rclone.org/) +* [nehm](https://github.com/bogem/nehm) +* [Pouch](https://github.com/alibaba/pouch) + +[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) +[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) +[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) + +# Table of Contents + +- [Overview](#overview) +- [Concepts](#concepts) + * [Commands](#commands) + * [Flags](#flags) +- [Installing](#installing) +- [Getting Started](#getting-started) + * [Using the Cobra Generator](#using-the-cobra-generator) + * [Using the Cobra Library](#using-the-cobra-library) + * [Working with Flags](#working-with-flags) + * [Positional and Custom Arguments](#positional-and-custom-arguments) + * [Example](#example) + * [Help Command](#help-command) + * [Usage Message](#usage-message) + * [PreRun and PostRun Hooks](#prerun-and-postrun-hooks) + * [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens) + * [Generating documentation for your command](#generating-documentation-for-your-command) + * [Generating bash completions](#generating-bash-completions) +- [Contributing](#contributing) +- [License](#license) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra is also an application that will generate your application scaffolding to rapidly +develop a Cobra-based application. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname` +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated bash autocomplete for your application +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibility to define your own help, usage, etc. +* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications will read like sentences when used. Users will know how +to use the application because they will natively understand how to use it. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE.` + or +`APPNAME COMMAND ARG --FLAG` + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command) + +## Flags + +A flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/spf13/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executable +along with the library and its dependencies: + + go get -u github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra-based +application will follow the following organizational structure: + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra. + +```go +package main + +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it. + +## Using the Cobra Library + +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. +You will optionally provide additional commands as you see fit. + +### Create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} +``` + +You will additionally define flags and handle configuration in your init() function. + +For example cmd/root.go: + +```go +import ( + "fmt" + "os" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func init() { + cobra.OnInitialize(initConfig) + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} + +func initConfig() { + // Don't forget to read config either from cfgFile or from home directory! + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".cobra" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".cobra") + } + + if err := viper.ReadInConfig(); err != nil { + fmt.Println("Can't read config:", err) + os.Exit(1) + } +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import ( + "fmt" + "os" + + "{pathToYourApp}/cmd" +) + +func main() { + cmd.Execute() +} +``` + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + +### Local Flag on Parent Commands + +By default Cobra only parses local flags on the target command, any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +parse local flags on each command before executing the target command. + +```go +command := cobra.Command{ + Use: "print [OPTIONS] [COMMANDS]", + TraverseChildren: true, +} +``` + +### Bind Flags with Config + +You can also bind your flags with [viper](https://github.com/spf13/viper): +```go +var author string + +func init() { + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) +} +``` + +In this example the persistent flag `author` is bound with `viper`. +**Note**, that the variable `author` will not be set to the value from config, +when the `--author` flag is not provided by user. + +More in [viper documentation](https://github.com/spf13/viper#working-with-flags). + +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + +## Positional and Custom Arguments + +Validation of positional arguments can be specified using the `Args` field +of `Command`. + +The following validators are built in: + +- `NoArgs` - the command will report an error if there are any positional args. +- `ArbitraryArgs` - the command will accept any args. +- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`. +- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args. +- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args. +- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args. +- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args. + +An example of setting the custom validator: + +```go +var cmd = &cobra.Command{ + Short: "hello", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) < 1 { + return errors.New("requires at least one arg") + } + if myapp.IsValidColor(args[0]) { + return nil + } + return fmt.Errorf("invalid color specified: %s", args[0]) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hello, World!") + }, +} +``` + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. +For many years people have printed back to the screen.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. +Echo works a lot like print, except it has a child command.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing +a count and a string.`, + Args: cobra.MinimumNArgs(1), + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + $ cobra help + + Cobra is a CLI library for Go that empowers applications. + This application is a tool to generate the needed files + to quickly create a Cobra application. + + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use +with following functions: + +```go +cmd.SetHelpCommand(cmd *Command) +cmd.SetHelpFunc(f func(*Command, []string)) +cmd.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage Message + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + $ cobra --invalid + Error: unknown flag: --invalid + Usage: + cobra [command] + + Available Commands: + add Add a command to a Cobra Application + help Help about any command + init Initialize a Cobra Application + + Flags: + -a, --author string author name for copyright attribution (default "YOUR NAME") + --config string config file (default is $HOME/.cobra.yaml) + -h, --help help for cobra + -l, --license string name of license for the project + --viper use Viper for configuration (default true) + + Use "cobra [command] --help" for more information about a command. + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. +Like help, the function and template are overridable through public methods: + +```go +cmd.SetUsageFunc(f func(*Command) error) +cmd.SetUsageTemplate(s string) +``` + +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + +## PreRun and PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + rootCmd.Execute() + fmt.Println() + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + rootCmd.Execute() +} +``` + +Output: +``` +Inside rootCmd PersistentPreRun with args: [] +Inside rootCmd PreRun with args: [] +Inside rootCmd Run with args: [] +Inside rootCmd PostRun with args: [] +Inside rootCmd PersistentPostRun with args: [] + +Inside rootCmd PersistentPreRun with args: [arg1 arg2] +Inside subCmd PreRun with args: [arg1 arg2] +Inside subCmd Run with args: [arg1 arg2] +Inside subCmd PostRun with args: [arg1 arg2] +Inside subCmd PersistentPostRun with args: [arg1 arg2] +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating documentation for your command + +Cobra can generate documentation based on subcommands, flags, etc. in the following formats: + +- [Markdown](doc/md_docs.md) +- [ReStructured Text](doc/rest_docs.md) +- [Man Page](doc/man_docs.md) + +## Generating bash completions + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +# Contributing + +1. Fork it +2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +3. Create your feature branch (`git checkout -b my-new-feature`) +4. Make changes and add them (`git add .`) +5. Commit your changes (`git commit -m 'Add some feature'`) +6. Push to the branch (`git push origin my-new-feature`) +7. Create new pull request + +# License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go new file mode 100644 index 0000000000..a5d8a9273e --- /dev/null +++ b/vendor/github.com/spf13/cobra/args.go @@ -0,0 +1,89 @@ +package cobra + +import ( + "fmt" +) + +type PositionalArgs func(cmd *Command, args []string) error + +// Legacy arg validation has the following behaviour: +// - root commands with no subcommands can take arbitrary arguments +// - root commands with subcommands will do subcommand validity checking +// - subcommands will always accept arbitrary arguments +func legacyArgs(cmd *Command, args []string) error { + // no subcommand, always take args + if !cmd.HasSubCommands() { + return nil + } + + // root command with subcommands, do subcommand checking. + if !cmd.HasParent() && len(args) > 0 { + return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + return nil +} + +// NoArgs returns an error if any args are included. +func NoArgs(cmd *Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + +// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. +func OnlyValidArgs(cmd *Command, args []string) error { + if len(cmd.ValidArgs) > 0 { + for _, v := range args { + if !stringInSlice(v, cmd.ValidArgs) { + return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + } + } + return nil +} + +// ArbitraryArgs never returns an error. +func ArbitraryArgs(cmd *Command, args []string) error { + return nil +} + +// MinimumNArgs returns an error if there is not at least N args. +func MinimumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < n { + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +// MaximumNArgs returns an error if there are more than N args. +func MaximumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) > n { + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs returns an error if there are not exactly n args. +func ExactArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) != n { + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// RangeArgs returns an error if the number of args is not within the expected range. +func RangeArgs(min int, max int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < min || len(args) > max { + return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) + } + return nil + } +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 0000000000..8fa8f486fa --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,584 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +// Annotations for Bash completion. +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func writePreamble(buf *bytes.Buffer, name string) { + buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + buf.WriteString(fmt.Sprintf(` +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__%[1]s_index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__%[1]s_contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__%[1]s_handle_reply() +{ + __%[1]s_debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() + if [[ ${index} -ge 0 ]]; then + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zsh completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__%[1]s_handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__%[1]s_handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__%[1]s_handle_flag() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + fi + + # skip the argument to a two word flag + if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__%[1]s_handle_noun() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__%[1]s_handle_command() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_%[1]s_root_command" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F "$next_command" >/dev/null && $next_command +} + +__%[1]s_handle_word() +{ + if [[ $c -ge $cword ]]; then + __%[1]s_handle_reply + return + fi + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __%[1]s_handle_flag + elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then + __%[1]s_handle_command + elif [[ $c -eq 0 ]]; then + __%[1]s_handle_command + elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then + # aliashash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + words[c]=${aliashash[${words[c]}]} + __%[1]s_handle_command + else + __%[1]s_handle_noun + fi + else + __%[1]s_handle_noun + fi + __%[1]s_handle_word +} + +`, name)) +} + +func writePostscript(buf *bytes.Buffer, name string) { + name = strings.Replace(name, ":", "__", -1) + buf.WriteString(fmt.Sprintf("__start_%s()\n", name)) + buf.WriteString(fmt.Sprintf(`{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + declare -A aliashash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __%[1]s_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%[1]s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __%[1]s_handle_word +} + +`, name)) + buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name)) + buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" commands=()\n") + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + writeCmdAliases(buf, c) + } + buf.WriteString("\n") +} + +func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) > 0 { + ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") + } else { + ext = "_filedir" + } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + case BashCompCustom: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + if len(value) > 0 { + handlers := strings.Join(value, "; ") + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + } else { + buf.WriteString(" flags_completion+=(:)\n") + } + case BashCompSubdirsInDir: + buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) == 1 { + ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] + } else { + ext = "_filedir -d" + } + buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + } + } +} + +func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { + name := flag.Shorthand + format := " " + if len(flag.NoOptDefVal) == 0 { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) +} + +func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { + name := flag.Name + format := " flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, name)) + writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) +} + +func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, name)) +} + +func writeFlags(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + if localNonPersistentFlags.Lookup(flag.Name) != nil { + writeLocalNonPersistentFlag(buf, flag) + } + }) + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + }) + + buf.WriteString("\n") +} + +func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += "\")\n" + buf.WriteString(fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)) + } + } + } + }) +} + +func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" must_have_one_noun=()\n") + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } +} + +func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { + if len(cmd.Aliases) == 0 { + return + } + + sort.Sort(sort.StringSlice(cmd.Aliases)) + + buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + for _, value := range cmd.Aliases { + buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) + buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + } + buf.WriteString(` fi`) + buf.WriteString("\n") +} +func writeArgAliases(buf *bytes.Buffer, cmd *Command) { + buf.WriteString(" noun_aliases=()\n") + sort.Sort(sort.StringSlice(cmd.ArgAliases)) + for _, value := range cmd.ArgAliases { + buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + } +} + +func gen(buf *bytes.Buffer, cmd *Command) { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + gen(buf, c) + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + + if cmd.Root() == cmd { + buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + } else { + buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + } + + buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) + buf.WriteString("\n") + buf.WriteString(" command_aliases=()\n") + buf.WriteString("\n") + + writeCommands(buf, cmd) + writeFlags(buf, cmd) + writeRequiredFlag(buf, cmd) + writeRequiredNouns(buf, cmd) + writeArgAliases(buf, cmd) + buf.WriteString("}\n\n") +} + +// GenBashCompletion generates bash completion file and writes to the passed writer. +func (c *Command) GenBashCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + writePreamble(buf, c.Name()) + if len(c.BashCompletionFunction) > 0 { + buf.WriteString(c.BashCompletionFunction + "\n") + } + gen(buf, c) + writePostscript(buf, c.Name()) + + _, err := buf.WriteTo(w) + return err +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +// GenBashCompletionFile generates bash completion file. +func (c *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md new file mode 100644 index 0000000000..e79d4769d1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -0,0 +1,221 @@ +# Generating Bash Completions For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An actual program which does so for the kubernetes kubectl binary is as follows: + +```go +package main + +import ( + "io/ioutil" + "os" + + "k8s.io/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/cmd/util" +) + +func main() { + kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") +} +``` + +`out.sh` will get you completions of subcommands and flags. Copy it to `/etc/bash_completion.d/` as described [here](https://debian-administration.org/article/316/An_introduction_to_bash_completion_part_1) and reset your terminal to use autocompletion. If you make additional annotations to your code, you can get even more intelligent and flexible behavior. + +## Creating your own custom functions + +Some more actual code that works in kubernetes: + +```bash +const ( + bash_completion_func = `__kubectl_parse_get() +{ + local kubectl_output out + if kubectl_output=$(kubectl get --no-headers "$1" 2>/dev/null); then + out=($(echo "${kubectl_output}" | awk '{print $1}')) + COMPREPLY=( $( compgen -W "${out[*]}" -- "$cur" ) ) + fi +} + +__kubectl_get_resource() +{ + if [[ ${#nouns[@]} -eq 0 ]]; then + return 1 + fi + __kubectl_parse_get ${nouns[${#nouns[@]} -1]} + if [[ $? -eq 0 ]]; then + return 0 + fi +} + +__custom_func() { + case ${last_command} in + kubectl_get | kubectl_describe | kubectl_delete | kubectl_stop) + __kubectl_get_resource + return + ;; + *) + ;; + esac +} +`) +``` + +And then I set that in my command definition: + +```go +cmds := &cobra.Command{ + Use: "kubectl", + Short: "kubectl controls the Kubernetes cluster manager", + Long: `kubectl controls the Kubernetes cluster manager. + +Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, + Run: runHelp, + BashCompletionFunction: bash_completion_func, +} +``` + +The `BashCompletionFunction` option is really only valid/useful on the root command. Doing the above will cause `__custom_func()` to be called when the built in processor was unable to find a solution. In the case of kubernetes a valid command might look something like `kubectl get pod [mypod]`. If you type `kubectl get pod [tab][tab]` the `__customc_func()` will run because the cobra.Command only understood "kubectl" and "get." `__custom_func()` will see that the cobra.Command is "kubectl_get" and will thus call another helper `__kubectl_get_resource()`. `__kubectl_get_resource` will look at the 'nouns' collected. In our example the only noun will be `pod`. So it will call `__kubectl_parse_get pod`. `__kubectl_parse_get` will actually call out to kubernetes and get any pods. It will then set `COMPREPLY` to valid pods! + +## Have the completions code complete your 'nouns' + +In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: + +```go +validArgs []string = { "pod", "node", "service", "replicationcontroller" } + +cmd := &cobra.Command{ + Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", + Short: "Display one or many resources", + Long: get_long, + Example: get_example, + Run: func(cmd *cobra.Command, args []string) { + err := RunGet(f, out, cmd, args) + util.CheckErr(err) + }, + ValidArgs: validArgs, +} +``` + +Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give results like + +```bash +# kubectl get [tab][tab] +node pod replicationcontroller service +``` + +## Plural form and shortcuts for nouns + +If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +# kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns +in this example again instead of the replication controllers. + +## Mark flags as required + +Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. + +```go +cmd.MarkFlagRequired("pod") +cmd.MarkFlagRequired("container") +``` + +and you'll get something like + +```bash +# kubectl exec [tab][tab][tab] +-c --container= -p --pod= +``` + +# Specify valid filename extensions for flags that take a filename + +In this example we use --filename= and expect to get a json or yaml file as the argument. To make this easier we annotate the --filename flag with valid filename extensions. + +```go + annotations := []string{"json", "yaml", "yml"} + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = annotations + + flag := &pflag.Flag{ + Name: "filename", + Shorthand: "f", + Usage: usage, + Value: value, + DefValue: value.String(), + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +Now when you run a command with this filename flag you'll get something like + +```bash +# kubectl create -f +test/ example/ rpmbuild/ +hello.yml test.json +``` + +So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. + +# Specify custom flag completion + +Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify +a custom flag completion function with cobra.BashCompCustom: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` +# Using bash aliases for commands + +You can also configure the `bash aliases` for the commands and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$) aliasname +completion firstcommand secondcommand +``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 0000000000..7010fd15b7 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,200 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// EnablePrefixMatching allows to set automatic prefix matching. Automatic prefix matching can be a dangerous thing +// to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = false + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = true + +// MousetrapHelpText enables an information splash screen on Windows +// if the CLI is started from explorer.exe. +// To disable the mousetrap, just set this variable to blank string (""). +// Works only on Microsoft Windows. +var MousetrapHelpText string = `This is a command line tool. + +You need to open cmd.exe and run it from there. +` + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions that are available to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize sets the passed functions to be run when each command's +// Execute method is called. +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 0000000000..34d1bf3671 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1517 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +// FParseErrWhitelist configures Flag parse errors to be ignored +type FParseErrWhitelist flag.ParseErrorsWhitelist + +// Command is just that, a command for your application. +// E.g. 'go run ...' - 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Use is the one-line usage message. + Use string + + // Aliases is an array of aliases that can be used instead of the first word in Use. + Aliases []string + + // SuggestFor is an array of command names for which this command will be suggested - + // similar to aliases but only suggests. + SuggestFor []string + + // Short is the short description shown in the 'help' output. + Short string + + // Long is the long message shown in the 'help ' output. + Long string + + // Example is examples of how to use the command. + Example string + + // ValidArgs is list of all valid non-flag arguments that are accepted in bash completions + ValidArgs []string + + // Expected arguments + Args PositionalArgs + + // ArgAliases is List of aliases for ValidArgs. + // These are not suggested to the user in the bash completion, + // but accepted if entered manually. + ArgAliases []string + + // BashCompletionFunction is custom functions used by the bash autocompletion generator. + BashCompletionFunction string + + // Deprecated defines, if this command is deprecated and should print this string when used. + Deprecated string + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // Annotations are key/value pairs that can be used by applications to identify or + // group commands. + Annotations map[string]string + + // Version defines the version for this command. If this value is non-empty and the command does not + // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + // will print content of the "Version" variable. + Version string + + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name. + // + // PersistentPreRun: children of this command will inherit and execute. + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error. + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error. + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this. + Run func(cmd *Command, args []string) + // RunE: Run but returns an error. + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error. + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun. + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error. + PersistentPostRunE func(cmd *Command, args []string) error + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + //FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + // args is actual args parsed from flags. + args []string + // flagErrorBuf contains all error messages from pflag. + flagErrorBuf *bytes.Buffer + // flags is full set of flags. + flags *flag.FlagSet + // pflags contains persistent flags. + pflags *flag.FlagSet + // lflags contains local flags. + lflags *flag.FlagSet + // iflags contains inherited flags. + iflags *flag.FlagSet + // parentsPflags is all persistent flags of cmd's parents. + parentsPflags *flag.FlagSet + // globNormFunc is the global normalization function + // that we can use on every pflag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // output is an output writer defined by user. + output io.Writer + // usageFunc is usage func defined by user. + usageFunc func(*Command) error + // usageTemplate is usage template defined by user. + usageTemplate string + // flagErrorFunc is func defined by user and it's called when the parsing of + // flags returns an error. + flagErrorFunc func(*Command, error) error + // helpTemplate is help template defined by user. + helpTemplate string + // helpFunc is help func defined by user. + helpFunc func(*Command, []string) + // helpCommand is command with usage 'help'. If it's not defined by user, + // cobra uses default help command. + helpCommand *Command + // versionTemplate is the version template defined by user. + versionTemplate string +} + +// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (c *Command) SetOutput(output io.Writer) { + c.output = output +} + +// SetUsageFunc sets usage function. Usage can be defined by application. +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// SetUsageTemplate sets usage template. Can be defined by Application. +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// SetFlagErrorFunc sets a function to generate an error when flag parsing +// fails. +func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { + c.flagErrorFunc = f +} + +// SetHelpFunc sets help function. Can be defined by Application. +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +// SetHelpCommand sets help command. +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// SetHelpTemplate sets help template to be used. Application can use it to set custom template. +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetVersionTemplate sets version template to be used. Application can use it to set custom template. +func (c *Command) SetVersionTemplate(s string) { + c.versionTemplate = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +// OutOrStdout returns output to stdout. +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// OutOrStderr returns output to stderr +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return c.output + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + if c.HasParent() { + return c.Parent().UsageFunc() + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } +} + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + if c.HasParent() { + return c.Parent().HelpFunc() + } + return func(c *Command, a []string) { + c.mergePersistentFlags() + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.Println(err) + } + } +} + +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +// UsageString return usage string. +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this +// command or a parent, or it returns a function which returns the original +// error. +func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { + if c.flagErrorFunc != nil { + return c.flagErrorFunc + } + + if c.HasParent() { + return c.parent.FlagErrorFunc() + } + return func(c *Command, err error) error { + return err + } +} + +var minUsagePadding = 25 + +// UsagePadding return padding for the usage. +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// CommandPathPadding return padding for the command path. +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +// NamePadding returns padding for the name. +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +// UsageTemplate returns usage template for the command. +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}} + +Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +// HelpTemplate return help template for the command. +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// VersionTemplate return version template for the command. +func (c *Command) VersionTemplate() string { + if c.versionTemplate != "" { + return c.versionTemplate + } + + if c.HasParent() { + return c.parent.VersionTemplate() + } + return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` +} + +func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { + flag := fs.Lookup(name) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { + if len(name) == 0 { + return false + } + + flag := fs.ShorthandLookup(name[:1]) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func stripFlags(args []string, c *Command) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + flags := c.Flags() + +Loop: + for len(args) > 0 { + s := args[0] + args = args[1:] + switch { + case s == "--": + // "--" terminates the flags + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + // If '--flag arg' then + // delete arg from args. + fallthrough // (do the same as below) + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // If '-f arg' then + // delete 'arg' from args or break the loop if len(args) <= 1. + if len(args) <= 1 { + break Loop + } else { + args = args[1:] + continue + } + case s != "" && !strings.HasPrefix(s, "-"): + commands = append(commands, s) + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +func argsMinusFirstX(args []string, x string) []string { + for i, y := range args { + if x == y { + ret := []string{} + ret = append(ret, args[:i]...) + ret = append(ret, args[i+1:]...) + return ret + } + } + return args +} + +func isFlagArg(arg string) bool { + return ((len(arg) >= 3 && arg[1] == '-') || + (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) +} + +// Find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + + cmd := c.findNext(nextSubCmd) + if cmd != nil { + return innerfind(cmd, argsMinusFirstX(innerArgs, nextSubCmd)) + } + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + if commandFound.Args == nil { + return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) + } + return commandFound, a, nil +} + +func (c *Command) findSuggestions(arg string) string { + if c.DisableSuggestions { + return "" + } + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + suggestionsString := "" + if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + return suggestionsString +} + +func (c *Command) findNext(next string) *Command { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if cmd.Name() == next || cmd.HasAlias(next) { + cmd.commandCalledAs.name = next + return cmd + } + if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { + matches = append(matches, cmd) + } + } + + if len(matches) == 1 { + return matches[0] + } + + return nil +} + +// Traverse the command tree to find the command, and parse args for +// each parent. +func (c *Command) Traverse(args []string) (*Command, []string, error) { + flags := []string{} + inFlag := false + + for i, arg := range args { + switch { + // A long flag with a space separated value + case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) + flags = append(flags, arg) + continue + // A short flag with a space separated value + case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): + inFlag = true + flags = append(flags, arg) + continue + // The value for a flag + case inFlag: + inFlag = false + flags = append(flags, arg) + continue + // A flag without a value, or with an `=` separated value + case isFlagArg(arg): + flags = append(flags, arg) + continue + } + + cmd := c.findNext(arg) + if cmd == nil { + return c, args, nil + } + + if err := c.ParseFlags(flags); err != nil { + return nil, args, err + } + return cmd.Traverse(args[i+1:]) + } + return c, args, nil +} + +// SuggestionsFor provides suggestions for the typedName. +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +// VisitParents visits all parents of the command and invokes fn on each parent. +func (c *Command) VisitParents(fn func(*Command)) { + if c.HasParent() { + fn(c.Parent()) + c.Parent().VisitParents(fn) + } +} + +// Root finds root command. +func (c *Command) Root() *Command { + if c.HasParent() { + return c.Parent().Root() + } + return c +} + +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help and version flag at the last point possible to allow for user + // overriding + c.InitDefaultHelpFlag() + c.InitDefaultVersionFlag() + + err = c.ParseFlags(a) + if err != nil { + return c.FlagErrorFunc()(c, err) + } + + // If help is called, regardless of other flags, return we want help. + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in InitDefaultHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal { + return flag.ErrHelp + } + + // for back-compat, only add version flag behavior if version is defined + if c.Version != "" { + versionVal, err := c.Flags().GetBool("version") + if err != nil { + c.Println("\"version\" flag declared as non-bool. Please correct your code") + return err + } + if versionVal { + err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } + } + + if !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } + + if err := c.ValidateArgs(argWoFlags); err != nil { + return err + } + + for p := c; p != nil; p = p.Parent() { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + break + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if err := c.validateRequiredFlags(); err != nil { + return err + } + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + break + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +// Execute uses the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +// ExecuteC executes the command. +func (c *Command) ExecuteC() (cmd *Command, err error) { + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help as the last point possible to allow for user + // overriding + c.InitDefaultHelpCmd() + + var args []string + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } else { + args = c.args + } + + var flags []string + if c.TraverseChildren { + cmd, flags, err = c.Traverse(args) + } else { + cmd, flags, err = c.Find(args) + } + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + + cmd.commandCalledAs.called = true + if cmd.commandCalledAs.name == "" { + cmd.commandCalledAs.name = cmd.Name() + } + + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if err == flag.ErrHelp { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + } + return cmd, err +} + +func (c *Command) ValidateArgs(args []string) error { + if c.Args == nil { + return nil + } + return c.Args(c, args) +} + +func (c *Command) validateRequiredFlags() error { + flags := c.Flags() + missingFlagNames := []string{} + flags.VisitAll(func(pflag *flag.Flag) { + requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] + if !found { + return + } + if (requiredAnnotation[0] == "true") && !pflag.Changed { + missingFlagNames = append(missingFlagNames, pflag.Name) + } + }) + + if len(missingFlagNames) > 0 { + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) + } + return nil +} + +// InitDefaultHelpFlag adds default help flag to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help flag, it will do nothing. +func (c *Command) InitDefaultHelpFlag() { + c.mergePersistentFlags() + if c.Flags().Lookup("help") == nil { + usage := "help for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().BoolP("help", "h", false, usage) + } +} + +// InitDefaultVersionFlag adds default version flag to c. +// It is called automatically by executing the c. +// If c already has a version flag, it will do nothing. +// If c.Version is empty, it will do nothing. +func (c *Command) InitDefaultVersionFlag() { + if c.Version == "" { + return + } + + c.mergePersistentFlags() + if c.Flags().Lookup("version") == nil { + usage := "version for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().Bool("version", false, usage) + } +} + +// InitDefaultHelpCmd adds default help command to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help command or c has no subcommands, it will do nothing. +func (c *Command) InitDefaultHelpCmd() { + if !c.HasSubCommands() { + return + } + + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. +Simply type ` + c.Name() + ` help [path to command] for full details.`, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q\n", args) + c.Root().Usage() + } else { + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.Help() + } + }, + } + } + c.RemoveCommand(c.helpCommand) + c.AddCommand(c.helpCommand) +} + +// ResetCommands delete parent, subcommand and help command from c. +func (c *Command) ResetCommands() { + c.parent = nil + c.commands = nil + c.helpCommand = nil + c.parentsPflags = nil +} + +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. +func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + c.commandsAreSorted = false + } +} + +// RemoveCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.OutOrStderr(), i...) +} + +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. +func (c *Command) Println(i ...interface{}) { + c.Print(fmt.Sprintln(i...)) +} + +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. +func (c *Command) Printf(format string, i ...interface{}) { + c.Print(fmt.Sprintf(format, i...)) +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + if c.HasParent() { + return c.Parent().CommandPath() + " " + c.Name() + } + return c.Name() +} + +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + var useline string + if c.HasParent() { + useline = c.parent.CommandPath() + " " + c.Use + } else { + useline = c.Use + } + if c.DisableFlagsInUseLine { + return useline + } + if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { + useline += " [flags]" + } + return useline +} + +// DebugFlags used to determine which flags have been assigned to which commands +// and which persist. +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if a == s { + return true + } + } + return false +} + +// CalledAs returns the command name or alias that was used to invoke +// this command or an empty string if the command has not been called. +func (c *Command) CalledAs() string { + if c.commandCalledAs.called { + return c.commandCalledAs.name + } + return "" +} + +// hasNameOrAliasPrefix returns true if the Name or any of aliases start +// with prefix +func (c *Command) hasNameOrAliasPrefix(prefix string) bool { + if strings.HasPrefix(c.Name(), prefix) { + c.commandCalledAs.name = c.Name() + return true + } + for _, alias := range c.Aliases { + if strings.HasPrefix(alias, prefix) { + c.commandCalledAs.name = alias + return true + } + } + return false +} + +// NameAndAliases returns a list of the command name and all aliases +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +// HasExample determines if the command has example. +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable. +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands. +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsAdditionalHelpTopicCommand determines if a command is an additional +// help topic command; additional help topic command is determined by the +// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that +// are runnable/hidden/deprecated. +// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. +func (c *Command) IsAdditionalHelpTopicCommand() bool { + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsAdditionalHelpTopicCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. +func (c *Command) HasHelpSubCommands() bool { + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsAdditionalHelpTopicCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub commands, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// HasParent determines if the command is a child command. +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Flags returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + + return c.flags +} + +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + } + c.lflags.SortFlags = c.Flags().SortFlags + if c.globNormFunc != nil { + c.lflags.SetNormalizeFunc(c.globNormFunc) + } + + addToLocal := func(f *flag.Flag) { + if c.lflags.Lookup(f.Name) == nil && c.parentsPflags.Lookup(f.Name) == nil { + c.lflags.AddFlag(f) + } + } + c.Flags().VisitAll(addToLocal) + c.PersistentFlags().VisitAll(addToLocal) + return c.lflags +} + +// InheritedFlags returns all flags which were inherited from parents commands. +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.iflags == nil { + c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.iflags.SetOutput(c.flagErrorBuf) + } + + local := c.LocalFlags() + if c.globNormFunc != nil { + c.iflags.SetNormalizeFunc(c.globNormFunc) + } + + c.parentsPflags.VisitAll(func(f *flag.Flag) { + if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + c.iflags.AddFlag(f) + } + }) + return c.iflags +} + +// NonInheritedFlags returns all flags which were not inherited from parent commands. +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// PersistentFlags returns the persistent FlagSet specifically set in the current command. +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// ResetFlags deletes all flags from command. +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) + + c.lflags = nil + c.iflags = nil + c.parentsPflags = nil +} + +// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// HasPersistentFlags checks if the command contains persistent flags. +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// HasLocalFlags checks if the command has flags specifically declared locally. +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +// HasInheritedFlags checks if the command has flags inherited from its parent command. +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// Recursively find matching persistent flag. +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil { + c.updateParentsPflags() + flag = c.parentsPflags.Lookup(name) + } + return +} + +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) error { + if c.DisableFlagParsing { + return nil + } + + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + beforeErrorBufLen := c.flagErrorBuf.Len() + c.mergePersistentFlags() + + //do it here after merging all flags and just before parse + c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + + err := c.Flags().Parse(args) + // Print warnings if they occurred (e.g. deprecated flag messages). + if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { + c.Print(c.flagErrorBuf.String()) + } + + return err +} + +// Parent returns a commands parent command. +func (c *Command) Parent() *Command { + return c.parent +} + +// mergePersistentFlags merges c.PersistentFlags() to c.Flags() +// and adds missing persistent flags of all parents. +func (c *Command) mergePersistentFlags() { + c.updateParentsPflags() + c.Flags().AddFlagSet(c.PersistentFlags()) + c.Flags().AddFlagSet(c.parentsPflags) +} + +// updateParentsPflags updates c.parentsPflags by adding +// new persistent flags of all parents. +// If c.parentsPflags == nil, it makes new. +func (c *Command) updateParentsPflags() { + if c.parentsPflags == nil { + c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags.SetOutput(c.flagErrorBuf) + c.parentsPflags.SortFlags = false + } + + if c.globNormFunc != nil { + c.parentsPflags.SetNormalizeFunc(c.globNormFunc) + } + + c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) + + c.VisitParents(func(parent *Command) { + c.parentsPflags.AddFlagSet(parent.PersistentFlags()) + }) +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 0000000000..6159c1cc19 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 0000000000..edec728e4f --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,20 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +func preExecHook(c *Command) { + if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go new file mode 100644 index 0000000000..889c22e273 --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -0,0 +1,126 @@ +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +// GenZshCompletionFile generates zsh completion file. +func (c *Command) GenZshCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenZshCompletion(outFile) +} + +// GenZshCompletion generates a zsh completion file and writes to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + + writeHeader(buf, c) + maxDepth := maxDepth(c) + writeLevelMapping(buf, maxDepth) + writeLevelCases(buf, maxDepth, c) + + _, err := buf.WriteTo(w) + return err +} + +func writeHeader(w io.Writer, cmd *Command) { + fmt.Fprintf(w, "#compdef %s\n\n", cmd.Name()) +} + +func maxDepth(c *Command) int { + if len(c.Commands()) == 0 { + return 0 + } + maxDepthSub := 0 + for _, s := range c.Commands() { + subDepth := maxDepth(s) + if subDepth > maxDepthSub { + maxDepthSub = subDepth + } + } + return 1 + maxDepthSub +} + +func writeLevelMapping(w io.Writer, numLevels int) { + fmt.Fprintln(w, `_arguments \`) + for i := 1; i <= numLevels; i++ { + fmt.Fprintf(w, ` '%d: :->level%d' \`, i, i) + fmt.Fprintln(w) + } + fmt.Fprintf(w, ` '%d: :%s'`, numLevels+1, "_files") + fmt.Fprintln(w) +} + +func writeLevelCases(w io.Writer, maxDepth int, root *Command) { + fmt.Fprintln(w, "case $state in") + defer fmt.Fprintln(w, "esac") + + for i := 1; i <= maxDepth; i++ { + fmt.Fprintf(w, " level%d)\n", i) + writeLevel(w, root, i) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") +} + +func writeLevel(w io.Writer, root *Command, i int) { + fmt.Fprintf(w, " case $words[%d] in\n", i) + defer fmt.Fprintln(w, " esac") + + commands := filterByLevel(root, i) + byParent := groupByParent(commands) + + for p, c := range byParent { + names := names(c) + fmt.Fprintf(w, " %s)\n", p) + fmt.Fprintf(w, " _arguments '%d: :(%s)'\n", i, strings.Join(names, " ")) + fmt.Fprintln(w, " ;;") + } + fmt.Fprintln(w, " *)") + fmt.Fprintln(w, " _arguments '*: :_files'") + fmt.Fprintln(w, " ;;") + +} + +func filterByLevel(c *Command, l int) []*Command { + cs := make([]*Command, 0) + if l == 0 { + cs = append(cs, c) + return cs + } + for _, s := range c.Commands() { + cs = append(cs, filterByLevel(s, l-1)...) + } + return cs +} + +func groupByParent(commands []*Command) map[string][]*Command { + m := make(map[string][]*Command) + for _, c := range commands { + parent := c.Parent() + if parent == nil { + continue + } + m[parent.Name()] = append(m[parent.Name()], c) + } + return m +} + +func names(commands []*Command) []string { + ns := make([]string, len(commands)) + for i, c := range commands { + ns[i] = c.Name() + } + return ns +} diff --git a/vendor/github.com/tv42/httpunix/.gitignore b/vendor/github.com/tv42/httpunix/.gitignore new file mode 100644 index 0000000000..9ed3b07cef --- /dev/null +++ b/vendor/github.com/tv42/httpunix/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/tv42/httpunix/LICENSE b/vendor/github.com/tv42/httpunix/LICENSE new file mode 100644 index 0000000000..33aec14578 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013-2015 Tommi Virtanen. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tv42/httpunix/httpunix.go b/vendor/github.com/tv42/httpunix/httpunix.go new file mode 100644 index 0000000000..95f5e95a81 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/httpunix.go @@ -0,0 +1,95 @@ +// Package httpunix provides a HTTP transport (net/http.RoundTripper) +// that uses Unix domain sockets instead of HTTP. +// +// This is useful for non-browser connections within the same host, as +// it allows using the file system for credentials of both client +// and server, and guaranteeing unique names. +// +// The URLs look like this: +// +// http+unix://LOCATION/PATH_ETC +// +// where LOCATION is translated to a file system path with +// Transport.RegisterLocation, and PATH_ETC follow normal http: scheme +// conventions. +package httpunix + +import ( + "bufio" + "errors" + "net" + "net/http" + "sync" + "time" +) + +// Scheme is the URL scheme used for HTTP over UNIX domain sockets. +const Scheme = "http+unix" + +// Transport is a http.RoundTripper that connects to Unix domain +// sockets. +type Transport struct { + DialTimeout time.Duration + RequestTimeout time.Duration + ResponseHeaderTimeout time.Duration + + mu sync.Mutex + // map a URL "hostname" to a UNIX domain socket path + loc map[string]string +} + +// RegisterLocation registers an URL location and maps it to the given +// file system path. +// +// Calling RegisterLocation twice for the same location is a +// programmer error, and causes a panic. +func (t *Transport) RegisterLocation(loc string, path string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.loc == nil { + t.loc = make(map[string]string) + } + if _, exists := t.loc[loc]; exists { + panic("location " + loc + " already registered") + } + t.loc[loc] = path +} + +var _ http.RoundTripper = (*Transport)(nil) + +// RoundTrip executes a single HTTP transaction. See +// net/http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL == nil { + return nil, errors.New("http+unix: nil Request.URL") + } + if req.URL.Scheme != Scheme { + return nil, errors.New("unsupported protocol scheme: " + req.URL.Scheme) + } + if req.URL.Host == "" { + return nil, errors.New("http+unix: no Host in request URL") + } + t.mu.Lock() + path, ok := t.loc[req.URL.Host] + t.mu.Unlock() + if !ok { + return nil, errors.New("unknown location: " + req.Host) + } + + c, err := net.DialTimeout("unix", path, t.DialTimeout) + if err != nil { + return nil, err + } + r := bufio.NewReader(c) + if t.RequestTimeout > 0 { + c.SetWriteDeadline(time.Now().Add(t.RequestTimeout)) + } + if err := req.Write(c); err != nil { + return nil, err + } + if t.ResponseHeaderTimeout > 0 { + c.SetReadDeadline(time.Now().Add(t.ResponseHeaderTimeout)) + } + resp, err := http.ReadResponse(r, req) + return resp, err +} diff --git a/vendor/google.golang.org/api/key.json.enc b/vendor/google.golang.org/api/key.json.enc index e69de29bb2..1286368010 100644 Binary files a/vendor/google.golang.org/api/key.json.enc and b/vendor/google.golang.org/api/key.json.enc differ diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go new file mode 100644 index 0000000000..ab2574e828 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme/scheme.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructuredscheme + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +var ( + scheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(scheme) +) + +// NewUnstructuredNegotiatedSerializer returns a simple, negotiated serializer +func NewUnstructuredNegotiatedSerializer() runtime.NegotiatedSerializer { + return unstructuredNegotiatedSerializer{ + scheme: scheme, + typer: NewUnstructuredObjectTyper(), + creator: NewUnstructuredCreator(), + } +} + +type unstructuredNegotiatedSerializer struct { + scheme *runtime.Scheme + typer runtime.ObjectTyper + creator runtime.ObjectCreater +} + +func (s unstructuredNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{ + { + MediaType: "application/json", + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), + PrettySerializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, true), + StreamSerializer: &runtime.StreamSerializerInfo{ + EncodesAsText: true, + Serializer: json.NewSerializer(json.DefaultMetaFactory, s.creator, s.typer, false), + Framer: json.Framer, + }, + }, + { + MediaType: "application/yaml", + EncodesAsText: true, + Serializer: json.NewYAMLSerializer(json.DefaultMetaFactory, s.creator, s.typer), + }, + } +} + +func (s unstructuredNegotiatedSerializer) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return versioning.NewDefaultingCodecForScheme(s.scheme, encoder, nil, gv, nil) +} + +func (s unstructuredNegotiatedSerializer) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return versioning.NewDefaultingCodecForScheme(s.scheme, nil, decoder, nil, gv) +} + +type unstructuredObjectTyper struct { +} + +// NewUnstructuredObjectTyper returns an object typer that can deal with unstructured things +func NewUnstructuredObjectTyper() runtime.ObjectTyper { + return unstructuredObjectTyper{} +} + +func (t unstructuredObjectTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + // Delegate for things other than Unstructured. + if _, ok := obj.(runtime.Unstructured); !ok { + return nil, false, fmt.Errorf("cannot type %T", obj) + } + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + + return []schema.GroupVersionKind{obj.GetObjectKind().GroupVersionKind()}, false, nil +} + +func (t unstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return true +} + +type unstructuredCreator struct{} + +// NewUnstructuredCreator returns a simple object creator that always returns an unstructured +func NewUnstructuredCreator() runtime.ObjectCreater { + return unstructuredCreator{} +} + +func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, error) { + ret := &unstructured.Unstructured{} + ret.SetGroupVersionKind(kind) + return ret, nil +} + +type unstructuredDefaulter struct { +} + +// NewUnstructuredDefaulter returns defaulter suitable for unstructured types that doesn't default anything +func NewUnstructuredDefaulter() runtime.ObjectDefaulter { + return unstructuredDefaulter{} +} + +func (d unstructuredDefaulter) Default(in runtime.Object) { +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 91fd4ed4f0..a60a7c0415 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { reader: r, decoder: d, buf: make([]byte, 1024), - maxBytes: 1024 * 1024, + maxBytes: 16 * 1024 * 1024, } } diff --git a/vendor/k8s.io/cli-runtime/CONTRIBUTING.md b/vendor/k8s.io/cli-runtime/CONTRIBUTING.md new file mode 100644 index 0000000000..025a70e30f --- /dev/null +++ b/vendor/k8s.io/cli-runtime/CONTRIBUTING.md @@ -0,0 +1,7 @@ +# Contributing guidelines + +Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes. + +This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/cli-runtime](https://git.k8s.io/kubernetes/staging/src/k8s.io/cli-runtime) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot). + +Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information diff --git a/vendor/k8s.io/cli-runtime/LICENSE b/vendor/k8s.io/cli-runtime/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/cli-runtime/OWNERS b/vendor/k8s.io/cli-runtime/OWNERS new file mode 100644 index 0000000000..27bd34cf6d --- /dev/null +++ b/vendor/k8s.io/cli-runtime/OWNERS @@ -0,0 +1,14 @@ +approvers: +- deads2k +- pwittrock +- seans3 +- soltysh +- juanvallejo +reviewers: +- deads2k +- juanvallejo +- pwittrock +- seans3 +- soltysh +labels: +- sig/cli diff --git a/vendor/k8s.io/cli-runtime/README.md b/vendor/k8s.io/cli-runtime/README.md new file mode 100644 index 0000000000..47ae2037ff --- /dev/null +++ b/vendor/k8s.io/cli-runtime/README.md @@ -0,0 +1,30 @@ +# cli-runtime + +Set of helpers for creating kubectl commands, as well as kubectl plugins. + + +## Purpose + +This library is a shared dependency for clients to work with Kubernetes API infrastructure which allows +to maintain kubectl compatible behavior. Its first consumer is `k8s.io/kubectl`. + + +## Compatibility + +There are *NO compatibility guarantees* for this repository. It is in direct support of Kubernetes, so branches +will track Kubernetes and be compatible with that repo. As we more cleanly separate the layers, we will review the +compatibility guarantee. + + +## Where does it come from? + +`cli-runtime` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/cli-runtime. +Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. + + +## Things you should *NOT* do + + 1. Add API types to this repo. This is for the helpers, not for the types. + 2. Directly modify any files under `pkg` in this repo. Those are driven from `k8s.io/kubernetes/staging/src/k8s.io/cli-runtime`. + 3. Expect compatibility. This repo is direct support of Kubernetes and the API isn't yet stable enough for API guarantees. + 4. Add any type that only makes sense only for `kubectl`. diff --git a/vendor/k8s.io/cli-runtime/SECURITY_CONTACTS b/vendor/k8s.io/cli-runtime/SECURITY_CONTACTS new file mode 100644 index 0000000000..0648a8ebff --- /dev/null +++ b/vendor/k8s.io/cli-runtime/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/k8s.io/cli-runtime/code-of-conduct.md b/vendor/k8s.io/cli-runtime/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go new file mode 100644 index 0000000000..f6686edd49 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags.go @@ -0,0 +1,234 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions/resource" +) + +// ResourceBuilderFlags are flags for finding resources +// TODO(juanvallejo): wire --local flag from commands through +type ResourceBuilderFlags struct { + FileNameFlags *FileNameFlags + + LabelSelector *string + FieldSelector *string + AllNamespaces *bool + All *bool + Local *bool + IncludeUninitialized *bool + + Scheme *runtime.Scheme + Latest bool + StopOnFirstError bool +} + +// NewResourceBuilderFlags returns a default ResourceBuilderFlags +func NewResourceBuilderFlags() *ResourceBuilderFlags { + filenames := []string{} + + return &ResourceBuilderFlags{ + FileNameFlags: &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &filenames, + Recursive: boolPtr(true), + }, + } +} + +func (o *ResourceBuilderFlags) WithFile(recurse bool, files ...string) *ResourceBuilderFlags { + o.FileNameFlags = &FileNameFlags{ + Usage: "identifying the resource.", + Filenames: &files, + Recursive: boolPtr(recurse), + } + + return o +} + +func (o *ResourceBuilderFlags) WithLabelSelector(selector string) *ResourceBuilderFlags { + o.LabelSelector = &selector + return o +} + +func (o *ResourceBuilderFlags) WithFieldSelector(selector string) *ResourceBuilderFlags { + o.FieldSelector = &selector + return o +} + +func (o *ResourceBuilderFlags) WithAllNamespaces(defaultVal bool) *ResourceBuilderFlags { + o.AllNamespaces = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithAll(defaultVal bool) *ResourceBuilderFlags { + o.All = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithLocal(defaultVal bool) *ResourceBuilderFlags { + o.Local = &defaultVal + return o +} + +// WithUninitialized is using an alpha feature and may be dropped +func (o *ResourceBuilderFlags) WithUninitialized(defaultVal bool) *ResourceBuilderFlags { + o.IncludeUninitialized = &defaultVal + return o +} + +func (o *ResourceBuilderFlags) WithScheme(scheme *runtime.Scheme) *ResourceBuilderFlags { + o.Scheme = scheme + return o +} + +func (o *ResourceBuilderFlags) WithLatest() *ResourceBuilderFlags { + o.Latest = true + return o +} + +func (o *ResourceBuilderFlags) StopOnError() *ResourceBuilderFlags { + o.StopOnFirstError = true + return o +} + +// AddFlags registers flags for finding resources +func (o *ResourceBuilderFlags) AddFlags(flagset *pflag.FlagSet) { + o.FileNameFlags.AddFlags(flagset) + + if o.LabelSelector != nil { + flagset.StringVarP(o.LabelSelector, "selector", "l", *o.LabelSelector, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)") + } + if o.FieldSelector != nil { + flagset.StringVar(o.FieldSelector, "field-selector", *o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + } + if o.AllNamespaces != nil { + flagset.BoolVar(o.AllNamespaces, "all-namespaces", *o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + } + if o.All != nil { + flagset.BoolVar(o.All, "all", *o.All, "Select all resources in the namespace of the specified resource types") + } + if o.Local != nil { + flagset.BoolVar(o.Local, "local", *o.Local, "If true, annotation will NOT contact api-server but run locally.") + } + if o.IncludeUninitialized != nil { + flagset.BoolVar(o.IncludeUninitialized, "include-uninitialized", *o.IncludeUninitialized, `If true, the kubectl command applies to uninitialized objects. If explicitly set to false, this flag overrides other flags that make the kubectl commands apply to uninitialized objects, e.g., "--all". Objects with empty metadata.initializers are regarded as initialized.`) + } +} + +// ToBuilder gives you back a resource finder to visit resources that are located +func (o *ResourceBuilderFlags) ToBuilder(restClientGetter RESTClientGetter, resources []string) ResourceFinder { + namespace, enforceNamespace, namespaceErr := restClientGetter.ToRawKubeConfigLoader().Namespace() + + builder := resource.NewBuilder(restClientGetter). + NamespaceParam(namespace).DefaultNamespace() + + if o.Scheme != nil { + builder.WithScheme(o.Scheme, o.Scheme.PrioritizedVersionsAllGroups()...) + } else { + builder.Unstructured() + } + + if o.FileNameFlags != nil { + opts := o.FileNameFlags.ToOptions() + builder.FilenameParam(enforceNamespace, &opts) + } + + if o.Local == nil || !*o.Local { + // resource type/name tuples only work non-local + if o.All != nil { + builder.ResourceTypeOrNameArgs(*o.All, resources...) + } else { + builder.ResourceTypeOrNameArgs(false, resources...) + } + // label selectors only work non-local (for now) + if o.LabelSelector != nil { + builder.LabelSelectorParam(*o.LabelSelector) + } + // field selectors only work non-local (forever) + if o.FieldSelector != nil { + builder.FieldSelectorParam(*o.FieldSelector) + } + // latest only works non-local (forever) + if o.Latest { + builder.Latest() + } + + } else { + builder.Local() + + if len(resources) > 0 { + builder.AddError(resource.LocalResourceError) + } + } + + if o.IncludeUninitialized != nil { + builder.IncludeUninitialized(*o.IncludeUninitialized) + } + + if !o.StopOnFirstError { + builder.ContinueOnError() + } + + return &ResourceFindBuilderWrapper{ + builder: builder. + Flatten(). // I think we're going to recommend this everywhere + AddError(namespaceErr), + } +} + +// ResourceFindBuilderWrapper wraps a builder in an interface +type ResourceFindBuilderWrapper struct { + builder *resource.Builder +} + +// Do finds you resources to check +func (b *ResourceFindBuilderWrapper) Do() resource.Visitor { + return b.builder.Do() +} + +// ResourceFinder allows mocking the resource builder +// TODO resource builders needs to become more interfacey +type ResourceFinder interface { + Do() resource.Visitor +} + +// ResourceFinderFunc is a handy way to make a ResourceFinder +type ResourceFinderFunc func() resource.Visitor + +// Do implements ResourceFinder +func (fn ResourceFinderFunc) Do() resource.Visitor { + return fn() +} + +// ResourceFinderForResult skins a visitor for re-use as a ResourceFinder +func ResourceFinderForResult(result resource.Visitor) ResourceFinder { + return ResourceFinderFunc(func() resource.Visitor { + return result + }) +} + +func strPtr(val string) *string { + return &val +} + +func boolPtr(val bool) *bool { + return &val +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go new file mode 100644 index 0000000000..ca87d1e4c5 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/builder_flags_fake.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "k8s.io/cli-runtime/pkg/genericclioptions/resource" +) + +// NewSimpleResourceFinder builds a super simple ResourceFinder that just iterates over the objects you provided +func NewSimpleFakeResourceFinder(infos ...*resource.Info) ResourceFinder { + return &fakeResourceFinder{ + Infos: infos, + } +} + +type fakeResourceFinder struct { + Infos []*resource.Info +} + +// Do implements the interface +func (f *fakeResourceFinder) Do() resource.Visitor { + return &fakeResourceResult{ + Infos: f.Infos, + } +} + +type fakeResourceResult struct { + Infos []*resource.Info +} + +// Visit just iterates over info +func (r *fakeResourceResult) Visit(fn resource.VisitorFunc) error { + for _, info := range r.Infos { + err := fn(info, nil) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go new file mode 100644 index 0000000000..e32e1eee77 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go @@ -0,0 +1,326 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +const ( + flagClusterName = "cluster" + flagAuthInfoName = "user" + flagContext = "context" + flagNamespace = "namespace" + flagAPIServer = "server" + flagInsecure = "insecure-skip-tls-verify" + flagCertFile = "client-certificate" + flagKeyFile = "client-key" + flagCAFile = "certificate-authority" + flagBearerToken = "token" + flagImpersonate = "as" + flagImpersonateGroup = "as-group" + flagUsername = "username" + flagPassword = "password" + flagTimeout = "request-timeout" + flagHTTPCacheDir = "cache-dir" +) + +var defaultCacheDir = filepath.Join(homedir.HomeDir(), ".kube", "http-cache") + +// RESTClientGetter is an interface that the ConfigFlags describe to provide an easier way to mock for commands +// and eliminate the direct coupling to a struct type. Users may wish to duplicate this type in their own packages +// as per the golang type overlapping. +type RESTClientGetter interface { + // ToRESTConfig returns restconfig + ToRESTConfig() (*rest.Config, error) + // ToDiscoveryClient returns discovery client + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + // ToRESTMapper returns a restmapper + ToRESTMapper() (meta.RESTMapper, error) + // ToRawKubeConfigLoader return kubeconfig loader as-is + ToRawKubeConfigLoader() clientcmd.ClientConfig +} + +var _ RESTClientGetter = &ConfigFlags{} + +// ConfigFlags composes the set of values necessary +// for obtaining a REST client config +type ConfigFlags struct { + CacheDir *string + KubeConfig *string + + // config flags + ClusterName *string + AuthInfoName *string + Context *string + Namespace *string + APIServer *string + Insecure *bool + CertFile *string + KeyFile *string + CAFile *string + BearerToken *string + Impersonate *string + ImpersonateGroup *[]string + Username *string + Password *string + Timeout *string +} + +// ToRESTConfig implements RESTClientGetter. +// Returns a REST client configuration based on a provided path +// to a .kubeconfig file, loading rules, and config flag overrides. +// Expects the AddFlags method to have been called. +func (f *ConfigFlags) ToRESTConfig() (*rest.Config, error) { + return f.ToRawKubeConfigLoader().ClientConfig() +} + +// ToRawKubeConfigLoader binds config flag values to config overrides +// Returns an interactive clientConfig if the password flag is enabled, +// or a non-interactive clientConfig otherwise. +func (f *ConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // use the standard defaults for this client command + // DEPRECATED: remove and replace with something more accurate + loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig + + if f.KubeConfig != nil { + loadingRules.ExplicitPath = *f.KubeConfig + } + + overrides := &clientcmd.ConfigOverrides{ClusterDefaults: clientcmd.ClusterDefaults} + + // bind auth info flag values to overrides + if f.CertFile != nil { + overrides.AuthInfo.ClientCertificate = *f.CertFile + } + if f.KeyFile != nil { + overrides.AuthInfo.ClientKey = *f.KeyFile + } + if f.BearerToken != nil { + overrides.AuthInfo.Token = *f.BearerToken + } + if f.Impersonate != nil { + overrides.AuthInfo.Impersonate = *f.Impersonate + } + if f.ImpersonateGroup != nil { + overrides.AuthInfo.ImpersonateGroups = *f.ImpersonateGroup + } + if f.Username != nil { + overrides.AuthInfo.Username = *f.Username + } + if f.Password != nil { + overrides.AuthInfo.Password = *f.Password + } + + // bind cluster flags + if f.APIServer != nil { + overrides.ClusterInfo.Server = *f.APIServer + } + if f.CAFile != nil { + overrides.ClusterInfo.CertificateAuthority = *f.CAFile + } + if f.Insecure != nil { + overrides.ClusterInfo.InsecureSkipTLSVerify = *f.Insecure + } + + // bind context flags + if f.Context != nil { + overrides.CurrentContext = *f.Context + } + if f.ClusterName != nil { + overrides.Context.Cluster = *f.ClusterName + } + if f.AuthInfoName != nil { + overrides.Context.AuthInfo = *f.AuthInfoName + } + if f.Namespace != nil { + overrides.Context.Namespace = *f.Namespace + } + + if f.Timeout != nil { + overrides.Timeout = *f.Timeout + } + + var clientConfig clientcmd.ClientConfig + + // we only have an interactive prompt when a password is allowed + if f.Password == nil { + clientConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + } else { + clientConfig = clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin) + } + + return clientConfig +} + +// ToDiscoveryClient implements RESTClientGetter. +// Expects the AddFlags method to have been called. +// Returns a CachedDiscoveryInterface using a computed RESTConfig. +func (f *ConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + config, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + + // The more groups you have, the more discovery requests you need to make. + // given 25 groups (our groups + a few custom resources) with one-ish version each, discovery needs to make 50 requests + // double it just so we don't end up here again for a while. This config is only used for discovery. + config.Burst = 100 + + // retrieve a user-provided value for the "cache-dir" + // defaulting to ~/.kube/http-cache if no user-value is given. + httpCacheDir := defaultCacheDir + if f.CacheDir != nil { + httpCacheDir = *f.CacheDir + } + + discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(homedir.HomeDir(), ".kube", "cache", "discovery"), config.Host) + return discovery.NewCachedDiscoveryClientForConfig(config, discoveryCacheDir, httpCacheDir, time.Duration(10*time.Minute)) +} + +// ToRESTMapper returns a mapper. +func (f *ConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return nil, err + } + + mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, discoveryClient) + return expander, nil +} + +// AddFlags binds client configuration flags to a given flagset +func (f *ConfigFlags) AddFlags(flags *pflag.FlagSet) { + if f.KubeConfig != nil { + flags.StringVar(f.KubeConfig, "kubeconfig", *f.KubeConfig, "Path to the kubeconfig file to use for CLI requests.") + } + if f.CacheDir != nil { + flags.StringVar(f.CacheDir, flagHTTPCacheDir, *f.CacheDir, "Default HTTP cache directory") + } + + // add config options + if f.CertFile != nil { + flags.StringVar(f.CertFile, flagCertFile, *f.CertFile, "Path to a client certificate file for TLS") + } + if f.KeyFile != nil { + flags.StringVar(f.KeyFile, flagKeyFile, *f.KeyFile, "Path to a client key file for TLS") + } + if f.BearerToken != nil { + flags.StringVar(f.BearerToken, flagBearerToken, *f.BearerToken, "Bearer token for authentication to the API server") + } + if f.Impersonate != nil { + flags.StringVar(f.Impersonate, flagImpersonate, *f.Impersonate, "Username to impersonate for the operation") + } + if f.ImpersonateGroup != nil { + flags.StringArrayVar(f.ImpersonateGroup, flagImpersonateGroup, *f.ImpersonateGroup, "Group to impersonate for the operation, this flag can be repeated to specify multiple groups.") + } + if f.Username != nil { + flags.StringVar(f.Username, flagUsername, *f.Username, "Username for basic authentication to the API server") + } + if f.Password != nil { + flags.StringVar(f.Password, flagPassword, *f.Password, "Password for basic authentication to the API server") + } + if f.ClusterName != nil { + flags.StringVar(f.ClusterName, flagClusterName, *f.ClusterName, "The name of the kubeconfig cluster to use") + } + if f.AuthInfoName != nil { + flags.StringVar(f.AuthInfoName, flagAuthInfoName, *f.AuthInfoName, "The name of the kubeconfig user to use") + } + if f.Namespace != nil { + flags.StringVarP(f.Namespace, flagNamespace, "n", *f.Namespace, "If present, the namespace scope for this CLI request") + } + if f.Context != nil { + flags.StringVar(f.Context, flagContext, *f.Context, "The name of the kubeconfig context to use") + } + + if f.APIServer != nil { + flags.StringVarP(f.APIServer, flagAPIServer, "s", *f.APIServer, "The address and port of the Kubernetes API server") + } + if f.Insecure != nil { + flags.BoolVar(f.Insecure, flagInsecure, *f.Insecure, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure") + } + if f.CAFile != nil { + flags.StringVar(f.CAFile, flagCAFile, *f.CAFile, "Path to a cert file for the certificate authority") + } + if f.Timeout != nil { + flags.StringVar(f.Timeout, flagTimeout, *f.Timeout, "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.") + } + +} + +// WithDeprecatedPasswordFlag enables the username and password config flags +func (f *ConfigFlags) WithDeprecatedPasswordFlag() *ConfigFlags { + f.Username = stringptr("") + f.Password = stringptr("") + return f +} + +// NewConfigFlags returns ConfigFlags with default values set +func NewConfigFlags() *ConfigFlags { + impersonateGroup := []string{} + insecure := false + + return &ConfigFlags{ + Insecure: &insecure, + Timeout: stringptr("0"), + KubeConfig: stringptr(""), + + CacheDir: stringptr(defaultCacheDir), + ClusterName: stringptr(""), + AuthInfoName: stringptr(""), + Context: stringptr(""), + Namespace: stringptr(""), + APIServer: stringptr(""), + CertFile: stringptr(""), + KeyFile: stringptr(""), + CAFile: stringptr(""), + BearerToken: stringptr(""), + Impersonate: stringptr(""), + ImpersonateGroup: &impersonateGroup, + } +} + +func stringptr(val string) *string { + return &val +} + +// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive +var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/\.)]`) + +// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name. +func computeDiscoverCacheDir(parentDir, host string) string { + // strip the optional scheme from host if its there: + schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1) + // now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived + safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_") + return filepath.Join(parentDir, safeHost) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go new file mode 100644 index 0000000000..64e9a68833 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/config_flags_fake.go @@ -0,0 +1,110 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type TestConfigFlags struct { + clientConfig clientcmd.ClientConfig + discoveryClient discovery.CachedDiscoveryInterface + restMapper meta.RESTMapper +} + +func (f *TestConfigFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + if f.clientConfig == nil { + panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") + } + return f.clientConfig +} + +func (f *TestConfigFlags) ToRESTConfig() (*rest.Config, error) { + return f.ToRawKubeConfigLoader().ClientConfig() +} + +func (f *TestConfigFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return f.discoveryClient, nil +} + +func (f *TestConfigFlags) ToRESTMapper() (meta.RESTMapper, error) { + if f.restMapper != nil { + return f.restMapper, nil + } + if f.discoveryClient != nil { + mapper := restmapper.NewDeferredDiscoveryRESTMapper(f.discoveryClient) + expander := restmapper.NewShortcutExpander(mapper, f.discoveryClient) + return expander, nil + } + return nil, fmt.Errorf("no restmapper") +} + +func (f *TestConfigFlags) WithClientConfig(clientConfig clientcmd.ClientConfig) *TestConfigFlags { + f.clientConfig = clientConfig + return f +} + +func (f *TestConfigFlags) WithRESTMapper(mapper meta.RESTMapper) *TestConfigFlags { + f.restMapper = mapper + return f +} + +func (f *TestConfigFlags) WithDiscoveryClient(c discovery.CachedDiscoveryInterface) *TestConfigFlags { + f.discoveryClient = c + return f +} + +func (f *TestConfigFlags) WithNamespace(ns string) *TestConfigFlags { + if f.clientConfig == nil { + panic("attempt to obtain a test RawKubeConfigLoader with no clientConfig specified") + } + f.clientConfig = &namespacedClientConfig{ + delegate: f.clientConfig, + namespace: ns, + } + return f +} + +func NewTestConfigFlags() *TestConfigFlags { + return &TestConfigFlags{} +} + +type namespacedClientConfig struct { + delegate clientcmd.ClientConfig + namespace string +} + +func (c *namespacedClientConfig) Namespace() (string, bool, error) { + return c.namespace, false, nil +} + +func (c *namespacedClientConfig) RawConfig() (clientcmdapi.Config, error) { + return c.delegate.RawConfig() +} +func (c *namespacedClientConfig) ClientConfig() (*rest.Config, error) { + return c.delegate.ClientConfig() +} +func (c *namespacedClientConfig) ConfigAccess() clientcmd.ConfigAccess { + return c.delegate.ConfigAccess() +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go new file mode 100644 index 0000000000..2bf32d2561 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package genericclioptions contains flags which can be added to you command, bound, completed, and produce +// useful helper functions. Nothing in this package can depend on kube/kube +package genericclioptions diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go new file mode 100644 index 0000000000..348a9c6368 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/filename_flags.go @@ -0,0 +1,71 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/cli-runtime/pkg/genericclioptions/resource" +) + +// Usage of this struct by itself is discouraged. +// These flags are composed by ResourceBuilderFlags +// which should be used instead. +type FileNameFlags struct { + Usage string + + Filenames *[]string + Recursive *bool +} + +func (o *FileNameFlags) ToOptions() resource.FilenameOptions { + options := resource.FilenameOptions{} + + if o == nil { + return options + } + + if o.Recursive != nil { + options.Recursive = *o.Recursive + } + if o.Filenames != nil { + options.Filenames = *o.Filenames + } + + return options +} + +func (o *FileNameFlags) AddFlags(flags *pflag.FlagSet) { + if o == nil { + return + } + + if o.Recursive != nil { + flags.BoolVarP(o.Recursive, "recursive", "R", *o.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + } + if o.Filenames != nil { + flags.StringSliceVarP(o.Filenames, "filename", "f", *o.Filenames, o.Usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go new file mode 100644 index 0000000000..4fc3a77b0c --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/io_options.go @@ -0,0 +1,57 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOStreams provides the standard names for iostreams. This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests +func NewTestIOStreams() (IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) { + in := &bytes.Buffer{} + out := &bytes.Buffer{} + errOut := &bytes.Buffer{} + + return IOStreams{ + In: in, + Out: out, + ErrOut: errOut, + }, in, out, errOut +} + +// NewTestIOStreamsDiscard returns a valid IOStreams that just discards +func NewTestIOStreamsDiscard() IOStreams { + in := &bytes.Buffer{} + return IOStreams{ + In: in, + Out: ioutil.Discard, + ErrOut: ioutil.Discard, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go new file mode 100644 index 0000000000..d9b5812737 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/json_yaml_flags.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +func (f *JSONYamlPrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return []string{"json", "yaml"} +} + +// JSONYamlPrintFlags provides default flags necessary for json/yaml printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type JSONYamlPrintFlags struct { +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling --output=(yaml|json) printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported Format types can be found in pkg/printers/printers.go +func (f *JSONYamlPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + var printer printers.ResourcePrinter + + outputFormat = strings.ToLower(outputFormat) + switch outputFormat { + case "json": + printer = &printers.JSONPrinter{} + case "yaml": + printer = &printers.YAMLPrinter{} + default: + return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} + } + + return printer, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to JSON or Yaml printing to it +func (f *JSONYamlPrintFlags) AddFlags(c *cobra.Command) {} + +// NewJSONYamlPrintFlags returns flags associated with +// yaml or json printing, with default values set. +func NewJSONYamlPrintFlags() *JSONYamlPrintFlags { + return &JSONYamlPrintFlags{} +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go new file mode 100644 index 0000000000..8fc2227cf1 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/jsonpath_flags.go @@ -0,0 +1,130 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +// templates are logically optional for specifying a format. +// this allows a user to specify a template format value +// as --output=jsonpath= +var jsonFormats = map[string]bool{ + "jsonpath": true, + "jsonpath-file": true, +} + +// JSONPathPrintFlags provides default flags necessary for template printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type JSONPathPrintFlags struct { + // indicates if it is OK to ignore missing keys for rendering + // an output template. + AllowMissingKeys *bool + TemplateArgument *string +} + +func (f *JSONPathPrintFlags) AllowedFormats() []string { + formats := make([]string, 0, len(jsonFormats)) + for format := range jsonFormats { + formats = append(formats, format) + } + sort.Strings(formats) + return formats +} + +// ToPrinter receives an templateFormat and returns a printer capable of +// handling --template format printing. +// Returns false if the specified templateFormat does not match a template format. +func (f *JSONPathPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { + if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { + return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + } + + templateValue := "" + + if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { + for format := range jsonFormats { + format = format + "=" + if strings.HasPrefix(templateFormat, format) { + templateValue = templateFormat[len(format):] + templateFormat = format[:len(format)-1] + break + } + } + } else { + templateValue = *f.TemplateArgument + } + + if _, supportedFormat := jsonFormats[templateFormat]; !supportedFormat { + return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} + } + + if len(templateValue) == 0 { + return nil, fmt.Errorf("template format specified but no template given") + } + + if templateFormat == "jsonpath-file" { + data, err := ioutil.ReadFile(templateValue) + if err != nil { + return nil, fmt.Errorf("error reading --template %s, %v\n", templateValue, err) + } + + templateValue = string(data) + } + + p, err := printers.NewJSONPathPrinter(templateValue) + if err != nil { + return nil, fmt.Errorf("error parsing jsonpath %s, %v\n", templateValue, err) + } + + allowMissingKeys := true + if f.AllowMissingKeys != nil { + allowMissingKeys = *f.AllowMissingKeys + } + + p.AllowMissingKeys(allowMissingKeys) + return p, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *JSONPathPrintFlags) AddFlags(c *cobra.Command) { + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when --output=jsonpath, --output=jsonpath-file.") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewJSONPathPrintFlags returns flags associated with +// --template printing, with default values set. +func NewJSONPathPrintFlags(templateValue string, allowMissingKeys bool) *JSONPathPrintFlags { + return &JSONPathPrintFlags{ + TemplateArgument: &templateValue, + AllowMissingKeys: &allowMissingKeys, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go new file mode 100644 index 0000000000..d35384c2ca --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/kube_template_flags.go @@ -0,0 +1,89 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +// KubeTemplatePrintFlags composes print flags that provide both a JSONPath and a go-template printer. +// This is necessary if dealing with cases that require support both both printers, since both sets of flags +// require overlapping flags. +type KubeTemplatePrintFlags struct { + GoTemplatePrintFlags *GoTemplatePrintFlags + JSONPathPrintFlags *JSONPathPrintFlags + + AllowMissingKeys *bool + TemplateArgument *string +} + +func (f *KubeTemplatePrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return append(f.GoTemplatePrintFlags.AllowedFormats(), f.JSONPathPrintFlags.AllowedFormats()...) +} + +func (f *KubeTemplatePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + if f == nil { + return nil, NoCompatiblePrinterError{} + } + + if p, err := f.JSONPathPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return p, err + } + return f.GoTemplatePrintFlags.ToPrinter(outputFormat) +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *KubeTemplatePrintFlags) AddFlags(c *cobra.Command) { + if f == nil { + return + } + + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewKubeTemplatePrintFlags returns flags associated with +// --template printing, with default values set. +func NewKubeTemplatePrintFlags() *KubeTemplatePrintFlags { + allowMissingKeysPtr := true + templateArgPtr := "" + + return &KubeTemplatePrintFlags{ + GoTemplatePrintFlags: &GoTemplatePrintFlags{ + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + }, + JSONPathPrintFlags: &JSONPathPrintFlags{ + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + }, + + TemplateArgument: &templateArgPtr, + AllowMissingKeys: &allowMissingKeysPtr, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go new file mode 100644 index 0000000000..3a1f4f3f51 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/name_flags.go @@ -0,0 +1,81 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +// NamePrintFlags provides default flags necessary for printing +// a resource's fully-qualified Kind.group/name, or a successful +// message about that resource if an Operation is provided. +type NamePrintFlags struct { + // Operation describes the name of the action that + // took place on an object, to be included in the + // finalized "successful" message. + Operation string +} + +func (f *NamePrintFlags) Complete(successTemplate string) error { + f.Operation = fmt.Sprintf(successTemplate, f.Operation) + return nil +} + +func (f *NamePrintFlags) AllowedFormats() []string { + if f == nil { + return []string{} + } + return []string{"name"} +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling --output=name printing. +// Returns false if the specified outputFormat does not match a supported format. +// Supported format types can be found in pkg/printers/printers.go +func (f *NamePrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + namePrinter := &printers.NamePrinter{ + Operation: f.Operation, + } + + outputFormat = strings.ToLower(outputFormat) + switch outputFormat { + case "name": + namePrinter.ShortOutput = true + fallthrough + case "": + return namePrinter, nil + default: + return nil, NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} + } +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to name printing to it +func (f *NamePrintFlags) AddFlags(c *cobra.Command) {} + +// NewNamePrintFlags returns flags associated with +// --name printing, with default values set. +func NewNamePrintFlags(operation string) *NamePrintFlags { + return &NamePrintFlags{ + Operation: operation, + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go new file mode 100644 index 0000000000..3f59dab4b9 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/print_flags.go @@ -0,0 +1,158 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +type NoCompatiblePrinterError struct { + OutputFormat *string + AllowedFormats []string + Options interface{} +} + +func (e NoCompatiblePrinterError) Error() string { + output := "" + if e.OutputFormat != nil { + output = *e.OutputFormat + } + + sort.Strings(e.AllowedFormats) + return fmt.Sprintf("unable to match a printer suitable for the output format %q, allowed formats are: %s", output, strings.Join(e.AllowedFormats, ",")) +} + +func IsNoCompatiblePrinterError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(NoCompatiblePrinterError) + return ok +} + +// PrintFlags composes common printer flag structs +// used across all commands, and provides a method +// of retrieving a known printer based on flag values provided. +type PrintFlags struct { + JSONYamlPrintFlags *JSONYamlPrintFlags + NamePrintFlags *NamePrintFlags + TemplatePrinterFlags *KubeTemplatePrintFlags + + TypeSetterPrinter *printers.TypeSetterPrinter + + OutputFormat *string + + // OutputFlagSpecified indicates whether the user specifically requested a certain kind of output. + // Using this function allows a sophisticated caller to change the flag binding logic if they so desire. + OutputFlagSpecified func() bool +} + +func (f *PrintFlags) Complete(successTemplate string) error { + return f.NamePrintFlags.Complete(successTemplate) +} + +func (f *PrintFlags) AllowedFormats() []string { + ret := []string{} + ret = append(ret, f.JSONYamlPrintFlags.AllowedFormats()...) + ret = append(ret, f.NamePrintFlags.AllowedFormats()...) + ret = append(ret, f.TemplatePrinterFlags.AllowedFormats()...) + return ret +} + +func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { + outputFormat := "" + if f.OutputFormat != nil { + outputFormat = *f.OutputFormat + } + // For backwards compatibility we want to support a --template argument given, even when no --output format is provided. + // If no explicit output format has been provided via the --output flag, fallback + // to honoring the --template argument. + templateFlagSpecified := f.TemplatePrinterFlags != nil && + f.TemplatePrinterFlags.TemplateArgument != nil && + len(*f.TemplatePrinterFlags.TemplateArgument) > 0 + outputFlagSpecified := f.OutputFlagSpecified != nil && f.OutputFlagSpecified() + if templateFlagSpecified && !outputFlagSpecified { + outputFormat = "go-template" + } + + if f.JSONYamlPrintFlags != nil { + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + if f.NamePrintFlags != nil { + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + if f.TemplatePrinterFlags != nil { + if p, err := f.TemplatePrinterFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) { + return f.TypeSetterPrinter.WrapToPrinter(p, err) + } + } + + return nil, NoCompatiblePrinterError{OutputFormat: f.OutputFormat, AllowedFormats: f.AllowedFormats()} +} + +func (f *PrintFlags) AddFlags(cmd *cobra.Command) { + f.JSONYamlPrintFlags.AddFlags(cmd) + f.NamePrintFlags.AddFlags(cmd) + f.TemplatePrinterFlags.AddFlags(cmd) + + if f.OutputFormat != nil { + cmd.Flags().StringVarP(f.OutputFormat, "output", "o", *f.OutputFormat, fmt.Sprintf("Output format. One of: %s.", strings.Join(f.AllowedFormats(), "|"))) + if f.OutputFlagSpecified == nil { + f.OutputFlagSpecified = func() bool { + return cmd.Flag("output").Changed + } + } + } +} + +// WithDefaultOutput sets a default output format if one is not provided through a flag value +func (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags { + f.OutputFormat = &output + return f +} + +// WithTypeSetter sets a wrapper than will surround the returned printer with a printer to type resources +func (f *PrintFlags) WithTypeSetter(scheme *runtime.Scheme) *PrintFlags { + f.TypeSetterPrinter = printers.NewTypeSetter(scheme) + return f +} + +func NewPrintFlags(operation string) *PrintFlags { + outputFormat := "" + + return &PrintFlags{ + OutputFormat: &outputFormat, + + JSONYamlPrintFlags: NewJSONYamlPrintFlags(), + NamePrintFlags: NewNamePrintFlags(operation), + TemplatePrinterFlags: NewKubeTemplatePrintFlags(), + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/discard.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/discard.go new file mode 100644 index 0000000000..cd934976da --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/discard.go @@ -0,0 +1,30 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDiscardingPrinter is a printer that discards all objects +func NewDiscardingPrinter() ResourcePrinterFunc { + return ResourcePrinterFunc(func(runtime.Object, io.Writer) error { + return nil + }) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/interface.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/interface.go new file mode 100644 index 0000000000..b59a935fca --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/interface.go @@ -0,0 +1,37 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "io" + + "k8s.io/apimachinery/pkg/runtime" +) + +// ResourcePrinterFunc is a function that can print objects +type ResourcePrinterFunc func(runtime.Object, io.Writer) error + +// PrintObj implements ResourcePrinter +func (fn ResourcePrinterFunc) PrintObj(obj runtime.Object, w io.Writer) error { + return fn(obj, w) +} + +// ResourcePrinter is an interface that knows how to print runtime objects. +type ResourcePrinter interface { + // Print receives a runtime object, formats it and prints it to a writer. + PrintObj(runtime.Object, io.Writer) error +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go new file mode 100644 index 0000000000..bb5bec7488 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/json.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + + "sigs.k8s.io/yaml" +) + +// JSONPrinter is an implementation of ResourcePrinter which outputs an object as JSON. +type JSONPrinter struct{} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which simply writes the object to the Writer. +func (p *JSONPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + switch obj := obj.(type) { + case *runtime.Unknown: + var buf bytes.Buffer + err := json.Indent(&buf, obj.Raw, "", " ") + if err != nil { + return err + } + buf.WriteRune('\n') + _, err = buf.WriteTo(w) + return err + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + data = append(data, '\n') + _, err = w.Write(data) + return err +} + +// YAMLPrinter is an implementation of ResourcePrinter which outputs an object as YAML. +// The input object is assumed to be in the internal version of an API and is converted +// to the given version first. +type YAMLPrinter struct{} + +// PrintObj prints the data as YAML. +func (p *YAMLPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + switch obj := obj.(type) { + case *runtime.Unknown: + data, err := yaml.JSONToYAML(obj.Raw) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + output, err := yaml.Marshal(obj) + if err != nil { + return err + } + _, err = fmt.Fprint(w, string(output)) + return err +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go new file mode 100644 index 0000000000..333b9c3344 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/jsonpath.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/jsonpath" +) + +// exists returns true if it would be possible to call the index function +// with these arguments. +// +// TODO: how to document this for users? +// +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func exists(item interface{}, indices ...interface{}) bool { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return false + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return false + } + if x < 0 || x >= int64(v.Len()) { + return false + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return false + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return false + } + } + if _, isNil := indirect(v); isNil { + return false + } + return true +} + +// stolen from text/template +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// JSONPathPrinter is an implementation of ResourcePrinter which formats data with jsonpath expression. +type JSONPathPrinter struct { + rawTemplate string + *jsonpath.JSONPath +} + +func NewJSONPathPrinter(tmpl string) (*JSONPathPrinter, error) { + j := jsonpath.New("out") + if err := j.Parse(tmpl); err != nil { + return nil, err + } + return &JSONPathPrinter{ + rawTemplate: tmpl, + JSONPath: j, + }, nil +} + +// PrintObj formats the obj with the JSONPath Template. +func (j *JSONPathPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + var queryObj interface{} = obj + if unstructured, ok := obj.(runtime.Unstructured); ok { + queryObj = unstructured.UnstructuredContent() + } else { + data, err := json.Marshal(obj) + if err != nil { + return err + } + queryObj = map[string]interface{}{} + if err := json.Unmarshal(data, &queryObj); err != nil { + return err + } + } + + if err := j.JSONPath.Execute(w, queryObj); err != nil { + buf := bytes.NewBuffer(nil) + fmt.Fprintf(buf, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(buf, "\ttemplate was:\n\t\t%v\n", j.rawTemplate) + fmt.Fprintf(buf, "\tobject given to jsonpath engine was:\n\t\t%#v\n\n", queryObj) + return fmt.Errorf("error executing jsonpath %q: %v\n", j.rawTemplate, buf.String()) + } + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/name.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/name.go new file mode 100644 index 0000000000..d04c5c6bbc --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/name.go @@ -0,0 +1,124 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NamePrinter is an implementation of ResourcePrinter which outputs "resource/name" pair of an object. +type NamePrinter struct { + // ShortOutput indicates whether an operation should be + // printed along side the "resource/name" pair for an object. + ShortOutput bool + // Operation describes the name of the action that + // took place on an object, to be included in the + // finalized "successful" message. + Operation string +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which decodes the object +// and print "resource/name" pair. If the object is a List, print all items in it. +func (p *NamePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + // we need an actual value in order to retrieve the package path for an object. + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + if meta.IsListType(obj) { + // we allow unstructured lists for now because they always contain the GVK information. We should chase down + // callers and stop them from passing unflattened lists + // TODO chase the caller that is setting this and remove it. + if _, ok := obj.(*unstructured.UnstructuredList); !ok { + return fmt.Errorf("list types are not supported by name printing: %T", obj) + } + + items, err := meta.ExtractList(obj) + if err != nil { + return err + } + for _, obj := range items { + if err := p.PrintObj(obj, w); err != nil { + return err + } + } + return nil + } + + if obj.GetObjectKind().GroupVersionKind().Empty() { + return fmt.Errorf("missing apiVersion or kind; try GetObjectKind().SetGroupVersionKind() if you know the type") + } + + name := "" + if acc, err := meta.Accessor(obj); err == nil { + if n := acc.GetName(); len(n) > 0 { + name = n + } + } + + return printObj(w, name, p.Operation, p.ShortOutput, GetObjectGroupKind(obj)) +} + +func GetObjectGroupKind(obj runtime.Object) schema.GroupKind { + if obj == nil { + return schema.GroupKind{Kind: ""} + } + groupVersionKind := obj.GetObjectKind().GroupVersionKind() + if len(groupVersionKind.Kind) > 0 { + return groupVersionKind.GroupKind() + } + + if uns, ok := obj.(*unstructured.Unstructured); ok { + if len(uns.GroupVersionKind().Kind) > 0 { + return uns.GroupVersionKind().GroupKind() + } + } + + return schema.GroupKind{Kind: ""} +} + +func printObj(w io.Writer, name string, operation string, shortOutput bool, groupKind schema.GroupKind) error { + if len(groupKind.Kind) == 0 { + return fmt.Errorf("missing kind for resource with name %v", name) + } + + if len(operation) > 0 { + operation = " " + operation + } + + if shortOutput { + operation = "" + } + + if len(groupKind.Group) == 0 { + fmt.Fprintf(w, "%s/%s%s\n", strings.ToLower(groupKind.Kind), name, operation) + return nil + } + + fmt.Fprintf(w, "%s.%s/%s%s\n", strings.ToLower(groupKind.Kind), groupKind.Group, name, operation) + return nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/sourcechecker.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/sourcechecker.go new file mode 100644 index 0000000000..e360c8fe0b --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/sourcechecker.go @@ -0,0 +1,60 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "strings" +) + +var ( + InternalObjectPrinterErr = "a versioned object must be passed to a printer" + + // disallowedPackagePrefixes contains regular expression templates + // for object package paths that are not allowed by printers. + disallowedPackagePrefixes = []string{ + "k8s.io/kubernetes/pkg/apis/", + } +) + +var InternalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes} + +func IsInternalObjectError(err error) bool { + if err == nil { + return false + } + + return err.Error() == InternalObjectPrinterErr +} + +// illegalPackageSourceChecker compares a given +// object's package path, and determines if the +// object originates from a disallowed source. +type illegalPackageSourceChecker struct { + // disallowedPrefixes is a slice of disallowed package path + // prefixes for a given runtime.Object that we are printing. + disallowedPrefixes []string +} + +func (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool { + for _, forbiddenPrefix := range c.disallowedPrefixes { + if strings.HasPrefix(pkgPath, forbiddenPrefix) || strings.Contains(pkgPath, "/vendor/"+forbiddenPrefix) { + return true + } + } + + return false +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/template.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/template.go new file mode 100644 index 0000000000..5dd807dad9 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/template.go @@ -0,0 +1,118 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "text/template" + + "k8s.io/apimachinery/pkg/runtime" +) + +// GoTemplatePrinter is an implementation of ResourcePrinter which formats data with a Go Template. +type GoTemplatePrinter struct { + rawTemplate string + template *template.Template +} + +func NewGoTemplatePrinter(tmpl []byte) (*GoTemplatePrinter, error) { + t, err := template.New("output"). + Funcs(template.FuncMap{ + "exists": exists, + "base64decode": base64decode, + }). + Parse(string(tmpl)) + if err != nil { + return nil, err + } + return &GoTemplatePrinter{ + rawTemplate: string(tmpl), + template: t, + }, nil +} + +// AllowMissingKeys tells the template engine if missing keys are allowed. +func (p *GoTemplatePrinter) AllowMissingKeys(allow bool) { + if allow { + p.template.Option("missingkey=default") + } else { + p.template.Option("missingkey=error") + } +} + +// PrintObj formats the obj with the Go Template. +func (p *GoTemplatePrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(InternalObjectPrinterErr) + } + + var data []byte + var err error + data, err = json.Marshal(obj) + if err != nil { + return err + } + + out := map[string]interface{}{} + if err := json.Unmarshal(data, &out); err != nil { + return err + } + if err = p.safeExecute(w, out); err != nil { + // It is way easier to debug this stuff when it shows up in + // stdout instead of just stdin. So in addition to returning + // a nice error, also print useful stuff with the writer. + fmt.Fprintf(w, "Error executing template: %v. Printing more information for debugging the template:\n", err) + fmt.Fprintf(w, "\ttemplate was:\n\t\t%v\n", p.rawTemplate) + fmt.Fprintf(w, "\traw data was:\n\t\t%v\n", string(data)) + fmt.Fprintf(w, "\tobject given to template engine was:\n\t\t%+v\n\n", out) + return fmt.Errorf("error executing template %q: %v", p.rawTemplate, err) + } + return nil +} + +// safeExecute tries to execute the template, but catches panics and returns an error +// should the template engine panic. +func (p *GoTemplatePrinter) safeExecute(w io.Writer, obj interface{}) error { + var panicErr error + // Sorry for the double anonymous function. There's probably a clever way + // to do this that has the defer'd func setting the value to be returned, but + // that would be even less obvious. + retErr := func() error { + defer func() { + if x := recover(); x != nil { + panicErr = fmt.Errorf("caught panic: %+v", x) + } + }() + return p.template.Execute(w, obj) + }() + if panicErr != nil { + return panicErr + } + return retErr +} + +func base64decode(v string) (string, error) { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return "", fmt.Errorf("base64 decode failed: %v", err) + } + return string(data), nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/typesetter.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/typesetter.go new file mode 100644 index 0000000000..8d2d9b56ec --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/printers/typesetter.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package printers + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// TypeSetterPrinter is an implementation of ResourcePrinter wraps another printer with types set on the objects +type TypeSetterPrinter struct { + Delegate ResourcePrinter + + Typer runtime.ObjectTyper +} + +// NewTypeSetter constructs a wrapping printer with required params +func NewTypeSetter(typer runtime.ObjectTyper) *TypeSetterPrinter { + return &TypeSetterPrinter{Typer: typer} +} + +// PrintObj is an implementation of ResourcePrinter.PrintObj which sets type information on the obj for the duration +// of printing. It is NOT threadsafe. +func (p *TypeSetterPrinter) PrintObj(obj runtime.Object, w io.Writer) error { + if obj == nil { + return p.Delegate.PrintObj(obj, w) + } + if !obj.GetObjectKind().GroupVersionKind().Empty() { + return p.Delegate.PrintObj(obj, w) + } + + // we were empty coming in, make sure we're empty going out. This makes the call thread-unsafe + defer func() { + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + }() + + gvks, _, err := p.Typer.ObjectKinds(obj) + if err != nil { + // printers wrapped by us expect to find the type information present + return fmt.Errorf("missing apiVersion or kind and cannot assign it; %v", err) + } + + for _, gvk := range gvks { + if len(gvk.Kind) == 0 { + continue + } + if len(gvk.Version) == 0 || gvk.Version == runtime.APIVersionInternal { + continue + } + obj.GetObjectKind().SetGroupVersionKind(gvk) + break + } + + return p.Delegate.PrintObj(obj, w) +} + +// ToPrinter returns a printer (not threadsafe!) that has been wrapped +func (p *TypeSetterPrinter) ToPrinter(delegate ResourcePrinter) ResourcePrinter { + if p == nil { + return delegate + } + + p.Delegate = delegate + return p +} + +// WrapToPrinter wraps the common ToPrinter method +func (p *TypeSetterPrinter) WrapToPrinter(delegate ResourcePrinter, err error) (ResourcePrinter, error) { + if err != nil { + return delegate, err + } + if p == nil { + return delegate, nil + } + + p.Delegate = delegate + return p, nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go new file mode 100644 index 0000000000..faf250d53c --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/record_flags.go @@ -0,0 +1,199 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "os" + "path/filepath" + "strings" + + "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/json" +) + +// ChangeCauseAnnotation is the annotation indicating a guess at "why" something was changed +const ChangeCauseAnnotation = "kubernetes.io/change-cause" + +// RecordFlags contains all flags associated with the "--record" operation +type RecordFlags struct { + // Record indicates the state of the recording flag. It is a pointer so a caller can opt out or rebind + Record *bool + + changeCause string +} + +// ToRecorder returns a ChangeCause recorder if --record=false was not +// explicitly given by the user +func (f *RecordFlags) ToRecorder() (Recorder, error) { + if f == nil { + return NoopRecorder{}, nil + } + + shouldRecord := false + if f.Record != nil { + shouldRecord = *f.Record + } + + // if flag was explicitly set to false by the user, + // do not record + if !shouldRecord { + return NoopRecorder{}, nil + } + + return &ChangeCauseRecorder{ + changeCause: f.changeCause, + }, nil +} + +// Complete is called before the command is run, but after it is invoked to finish the state of the struct before use. +func (f *RecordFlags) Complete(cmd *cobra.Command) error { + if f == nil { + return nil + } + + f.changeCause = parseCommandArguments(cmd) + return nil +} + +func (f *RecordFlags) CompleteWithChangeCause(cause string) error { + if f == nil { + return nil + } + + f.changeCause = cause + return nil +} + +// AddFlags binds the requested flags to the provided flagset +// TODO have this only take a flagset +func (f *RecordFlags) AddFlags(cmd *cobra.Command) { + if f == nil { + return + } + + if f.Record != nil { + cmd.Flags().BoolVar(f.Record, "record", *f.Record, "Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.") + } +} + +// NewRecordFlags provides a RecordFlags with reasonable default values set for use +func NewRecordFlags() *RecordFlags { + record := false + + return &RecordFlags{ + Record: &record, + } +} + +// Recorder is used to record why a runtime.Object was changed in an annotation. +type Recorder interface { + // Record records why a runtime.Object was changed in an annotation. + Record(runtime.Object) error + MakeRecordMergePatch(runtime.Object) ([]byte, error) +} + +// NoopRecorder does nothing. It is a "do nothing" that can be returned so code doesn't switch on it. +type NoopRecorder struct{} + +// Record implements Recorder +func (r NoopRecorder) Record(obj runtime.Object) error { + return nil +} + +// MakeRecordMergePatch implements Recorder +func (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { + return nil, nil +} + +// ChangeCauseRecorder annotates a "change-cause" to an input runtime object +type ChangeCauseRecorder struct { + changeCause string +} + +// Record annotates a "change-cause" to a given info if either "shouldRecord" is true, +// or the resource info previously contained a "change-cause" annotation. +func (r *ChangeCauseRecorder) Record(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + annotations[ChangeCauseAnnotation] = r.changeCause + accessor.SetAnnotations(annotations) + return nil +} + +// MakeRecordMergePatch produces a merge patch for updating the recording annotation. +func (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) { + // copy so we don't mess with the original + objCopy := obj.DeepCopyObject() + if err := r.Record(objCopy); err != nil { + return nil, err + } + + oldData, err := json.Marshal(obj) + if err != nil { + return nil, err + } + newData, err := json.Marshal(objCopy) + if err != nil { + return nil, err + } + + return jsonpatch.CreateMergePatch(oldData, newData) +} + +// parseCommandArguments will stringify and return all environment arguments ie. a command run by a client +// using the factory. +// Set showSecrets false to filter out stuff like secrets. +func parseCommandArguments(cmd *cobra.Command) string { + if len(os.Args) == 0 { + return "" + } + + flags := "" + parseFunc := func(flag *pflag.Flag, value string) error { + flags = flags + " --" + flag.Name + if set, ok := flag.Annotations["classified"]; !ok || len(set) == 0 { + flags = flags + "=" + value + } else { + flags = flags + "=CLASSIFIED" + } + return nil + } + var err error + err = cmd.Flags().ParseAll(os.Args[1:], parseFunc) + if err != nil || !cmd.Flags().Parsed() { + return "" + } + + args := "" + if arguments := cmd.Flags().Args(); len(arguments) > 0 { + args = " " + strings.Join(arguments, " ") + } + + base := filepath.Base(os.Args[0]) + return base + args + flags +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go new file mode 100644 index 0000000000..42f660a4e5 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/builder.go @@ -0,0 +1,1167 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/restmapper" +) + +var FileExtensions = []string{".json", ".yaml", ".yml"} +var InputExtensions = append(FileExtensions, "stdin") + +const defaultHttpGetAttempts int = 3 + +// Builder provides convenience functions for taking arguments and parameters +// from the command line and converting them to a list of resources to iterate +// over using the Visitor interface. +type Builder struct { + categoryExpanderFn CategoryExpanderFunc + + // mapper is set explicitly by resource builders + mapper *mapper + + // clientConfigFn is a function to produce a client, *if* you need one + clientConfigFn ClientConfigFunc + + restMapperFn RESTMapperFunc + + // objectTyper is statically determinant per-command invocation based on your internal or unstructured choice + // it does not ever need to rely upon discovery. + objectTyper runtime.ObjectTyper + + // codecFactory describes which codecs you want to use + negotiatedSerializer runtime.NegotiatedSerializer + + // local indicates that we cannot make server calls + local bool + + errs []error + + paths []Visitor + stream bool + dir bool + + labelSelector *string + fieldSelector *string + selectAll bool + includeUninitialized bool + limitChunks int64 + requestTransforms []RequestTransform + + resources []string + + namespace string + allNamespace bool + names []string + + resourceTuples []resourceTuple + + defaultNamespace bool + requireNamespace bool + + flatten bool + latest bool + + requireObject bool + + singleResourceType bool + continueOnError bool + + singleItemImplied bool + + export bool + + schema ContentValidator + + // fakeClientFn is used for testing + fakeClientFn FakeClientFunc +} + +var missingResourceError = fmt.Errorf(`You must provide one or more resources by argument or filename. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json' + ' ' + ''`) + +var LocalResourceError = errors.New(`error: you must specify resources by --filename when --local is set. +Example resource specifications include: + '-f rsrc.yaml' + '--filename=rsrc.json'`) + +// TODO: expand this to include other errors. +func IsUsageError(err error) bool { + if err == nil { + return false + } + return err == missingResourceError +} + +type FilenameOptions struct { + Filenames []string + Recursive bool +} + +type resourceTuple struct { + Resource string + Name string +} + +type FakeClientFunc func(version schema.GroupVersion) (RESTClient, error) + +func NewFakeBuilder(fakeClientFn FakeClientFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + ret := newBuilder(nil, restMapper, categoryExpander) + ret.fakeClientFn = fakeClientFn + return ret +} + +// NewBuilder creates a builder that operates on generic objects. At least one of +// internal or unstructured must be specified. +// TODO: Add versioned client (although versioned is still lossy) +// TODO remove internal and unstructured mapper and instead have them set the negotiated serializer for use in the client +func newBuilder(clientConfigFn ClientConfigFunc, restMapper RESTMapperFunc, categoryExpander CategoryExpanderFunc) *Builder { + return &Builder{ + clientConfigFn: clientConfigFn, + restMapperFn: restMapper, + categoryExpanderFn: categoryExpander, + requireObject: true, + } +} + +func NewBuilder(restClientGetter RESTClientGetter) *Builder { + categoryExpanderFn := func() (restmapper.CategoryExpander, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryCategoryExpander(discoveryClient), err + } + + return newBuilder( + restClientGetter.ToRESTConfig, + (&cachingRESTMapperFunc{delegate: restClientGetter.ToRESTMapper}).ToRESTMapper, + (&cachingCategoryExpanderFunc{delegate: categoryExpanderFn}).ToCategoryExpander, + ) +} + +func (b *Builder) Schema(schema ContentValidator) *Builder { + b.schema = schema + return b +} + +func (b *Builder) AddError(err error) *Builder { + if err == nil { + return b + } + b.errs = append(b.errs, err) + return b +} + +// FilenameParam groups input in two categories: URLs and files (files, directories, STDIN) +// If enforceNamespace is false, namespaces in the specs will be allowed to +// override the default namespace. If it is true, namespaces that don't match +// will cause an error. +// If ContinueOnError() is set prior to this method, objects on the path that are not +// recognized will be ignored (but logged at V(2)). +func (b *Builder) FilenameParam(enforceNamespace bool, filenameOptions *FilenameOptions) *Builder { + recursive := filenameOptions.Recursive + paths := filenameOptions.Filenames + for _, s := range paths { + switch { + case s == "-": + b.Stdin() + case strings.Index(s, "http://") == 0 || strings.Index(s, "https://") == 0: + url, err := url.Parse(s) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the URL passed to filename %q is not valid: %v", s, err)) + continue + } + b.URL(defaultHttpGetAttempts, url) + default: + if !recursive { + b.singleItemImplied = true + } + b.Path(recursive, s) + } + } + + if enforceNamespace { + b.RequireNamespace() + } + + return b +} + +// Unstructured updates the builder so that it will request and send unstructured +// objects. Unstructured objects preserve all fields sent by the server in a map format +// based on the object's JSON structure which means no data is lost when the client +// reads and then writes an object. Use this mode in preference to Internal unless you +// are working with Go types directly. +func (b *Builder) Unstructured() *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use unstructured types")) + return b + } + b.objectTyper = unstructuredscheme.NewUnstructuredObjectTyper() + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: unstructured.UnstructuredJSONScheme, + } + + return b +} + +// WithScheme uses the scheme to manage typing, conversion (optional), and decoding. If decodingVersions +// is empty, then you can end up with internal types. You have been warned. +func (b *Builder) WithScheme(scheme *runtime.Scheme, decodingVersions ...schema.GroupVersion) *Builder { + if b.mapper != nil { + b.errs = append(b.errs, fmt.Errorf("another mapper was already selected, cannot use internal types")) + return b + } + b.objectTyper = scheme + codecFactory := serializer.NewCodecFactory(scheme) + negotiatedSerializer := runtime.NegotiatedSerializer(codecFactory) + // if you specified versions, you're specifying a desire for external types, which you don't want to round-trip through + // internal types + if len(decodingVersions) > 0 { + negotiatedSerializer = &serializer.DirectCodecFactory{CodecFactory: codecFactory} + } + b.negotiatedSerializer = negotiatedSerializer + + b.mapper = &mapper{ + localFn: b.isLocal, + restMapperFn: b.restMapperFn, + clientFn: b.getClient, + decoder: codecFactory.UniversalDecoder(decodingVersions...), + } + + return b +} + +// LocalParam calls Local() if local is true. +func (b *Builder) LocalParam(local bool) *Builder { + if local { + b.Local() + } + return b +} + +// Local will avoid asking the server for results. +func (b *Builder) Local() *Builder { + b.local = true + return b +} + +func (b *Builder) isLocal() bool { + return b.local +} + +// Mapper returns a copy of the current mapper. +func (b *Builder) Mapper() *mapper { + mapper := *b.mapper + return &mapper +} + +// URL accepts a number of URLs directly. +func (b *Builder) URL(httpAttemptCount int, urls ...*url.URL) *Builder { + for _, u := range urls { + b.paths = append(b.paths, &URLVisitor{ + URL: u, + StreamVisitor: NewStreamVisitor(nil, b.mapper, u.String(), b.schema), + HttpAttemptCount: httpAttemptCount, + }) + } + return b +} + +// Stdin will read objects from the standard input. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stdin() *Builder { + b.stream = true + b.paths = append(b.paths, FileVisitorForSTDIN(b.mapper, b.schema)) + return b +} + +// Stream will read objects from the provided reader, and if an error occurs will +// include the name string in the error message. If ContinueOnError() is set +// prior to this method being called, objects in the stream that are unrecognized +// will be ignored (but logged at V(2)). +func (b *Builder) Stream(r io.Reader, name string) *Builder { + b.stream = true + b.paths = append(b.paths, NewStreamVisitor(r, b.mapper, name, b.schema)) + return b +} + +// Path accepts a set of paths that may be files, directories (all can containing +// one or more resources). Creates a FileVisitor for each file and then each +// FileVisitor is streaming the content to a StreamVisitor. If ContinueOnError() is set +// prior to this method being called, objects on the path that are unrecognized will be +// ignored (but logged at V(2)). +func (b *Builder) Path(recursive bool, paths ...string) *Builder { + for _, p := range paths { + _, err := os.Stat(p) + if os.IsNotExist(err) { + b.errs = append(b.errs, fmt.Errorf("the path %q does not exist", p)) + continue + } + if err != nil { + b.errs = append(b.errs, fmt.Errorf("the path %q cannot be accessed: %v", p, err)) + continue + } + + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, recursive, FileExtensions, b.schema) + if err != nil { + b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) + } + if len(visitors) > 1 { + b.dir = true + } + + b.paths = append(b.paths, visitors...) + } + if len(b.paths) == 0 && len(b.errs) == 0 { + b.errs = append(b.errs, fmt.Errorf("error reading %v: recognized file extensions are %v", paths, FileExtensions)) + } + return b +} + +// ResourceTypes is a list of types of resources to operate on, when listing objects on +// the server or retrieving objects that match a selector. +func (b *Builder) ResourceTypes(types ...string) *Builder { + b.resources = append(b.resources, types...) + return b +} + +// ResourceNames accepts a default type and one or more names, and creates tuples of +// resources +func (b *Builder) ResourceNames(resource string, names ...string) *Builder { + for _, name := range names { + // See if this input string is of type/name format + tuple, ok, err := splitResourceTypeName(name) + if err != nil { + b.errs = append(b.errs, err) + return b + } + + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + continue + } + if len(resource) == 0 { + b.errs = append(b.errs, fmt.Errorf("the argument %q must be RESOURCE/NAME", name)) + continue + } + + // Use the given default type to create a resource tuple + b.resourceTuples = append(b.resourceTuples, resourceTuple{Resource: resource, Name: name}) + } + return b +} + +// LabelSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources invoke `b.LabelSelector(labels.Everything.String)`. +func (b *Builder) LabelSelectorParam(s string) *Builder { + selector := strings.TrimSpace(s) + if len(selector) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty label selector %q with previously set 'all' parameter. ", s)) + return b + } + return b.LabelSelector(selector) +} + +// LabelSelector accepts a selector directly and will filter the resulting list by that object. +// Use LabelSelectorParam instead for user input. +func (b *Builder) LabelSelector(selector string) *Builder { + if len(selector) == 0 { + return b + } + + b.labelSelector = &selector + return b +} + +// FieldSelectorParam defines a selector that should be applied to the object types to load. +// This will not affect files loaded from disk or URL. If the parameter is empty it is +// a no-op - to select all resources. +func (b *Builder) FieldSelectorParam(s string) *Builder { + s = strings.TrimSpace(s) + if len(s) == 0 { + return b + } + if b.selectAll { + b.errs = append(b.errs, fmt.Errorf("found non-empty field selector %q with previously set 'all' parameter. ", s)) + return b + } + b.fieldSelector = &s + return b +} + +// ExportParam accepts the export boolean for these resources +func (b *Builder) ExportParam(export bool) *Builder { + b.export = export + return b +} + +// IncludeUninitialized accepts the include-uninitialized boolean for these resources +func (b *Builder) IncludeUninitialized(includeUninitialized bool) *Builder { + b.includeUninitialized = includeUninitialized + return b +} + +// NamespaceParam accepts the namespace that these resources should be +// considered under from - used by DefaultNamespace() and RequireNamespace() +func (b *Builder) NamespaceParam(namespace string) *Builder { + b.namespace = namespace + return b +} + +// DefaultNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty. +func (b *Builder) DefaultNamespace() *Builder { + b.defaultNamespace = true + return b +} + +// AllNamespaces instructs the builder to metav1.NamespaceAll as a namespace to request resources +// across all of the namespace. This overrides the namespace set by NamespaceParam(). +func (b *Builder) AllNamespaces(allNamespace bool) *Builder { + if allNamespace { + b.namespace = metav1.NamespaceAll + } + b.allNamespace = allNamespace + return b +} + +// RequireNamespace instructs the builder to set the namespace value for any object found +// to NamespaceParam() if empty, and if the value on the resource does not match +// NamespaceParam() an error will be returned. +func (b *Builder) RequireNamespace() *Builder { + b.requireNamespace = true + return b +} + +// RequestChunksOf attempts to load responses from the server in batches of size limit +// to avoid long delays loading and transferring very large lists. If unset defaults to +// no chunking. +func (b *Builder) RequestChunksOf(chunkSize int64) *Builder { + b.limitChunks = chunkSize + return b +} + +// TransformRequests alters API calls made by clients requested from this builder. Pass +// an empty list to clear modifiers. +func (b *Builder) TransformRequests(opts ...RequestTransform) *Builder { + b.requestTransforms = opts + return b +} + +// SelectEverythingParam +func (b *Builder) SelectAllParam(selectAll bool) *Builder { + if selectAll && (b.labelSelector != nil || b.fieldSelector != nil) { + b.errs = append(b.errs, fmt.Errorf("setting 'all' parameter but found a non empty selector. ")) + return b + } + b.selectAll = selectAll + return b +} + +// ResourceTypeOrNameArgs indicates that the builder should accept arguments +// of the form `([,,...]| [,,...])`. When one argument is +// received, the types provided will be retrieved from the server (and be comma delimited). +// When two or more arguments are received, they must be a single type and resource name(s). +// The allowEmptySelector permits to select all the resources (via Everything func). +func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { + args = normalizeMultipleResourcesArgs(args) + if ok, err := hasCombinedTypeArgs(args); ok { + if err != nil { + b.errs = append(b.errs, err) + return b + } + for _, s := range args { + tuple, ok, err := splitResourceTypeName(s) + if err != nil { + b.errs = append(b.errs, err) + return b + } + if ok { + b.resourceTuples = append(b.resourceTuples, tuple) + } + } + return b + } + if len(args) > 0 { + // Try replacing aliases only in types + args[0] = b.ReplaceAliases(args[0]) + } + switch { + case len(args) > 2: + b.names = append(b.names, args[1:]...) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 2: + b.names = append(b.names, args[1]) + b.ResourceTypes(SplitResourceArgument(args[0])...) + case len(args) == 1: + b.ResourceTypes(SplitResourceArgument(args[0])...) + if b.labelSelector == nil && allowEmptySelector { + selector := labels.Everything().String() + b.labelSelector = &selector + } + case len(args) == 0: + default: + b.errs = append(b.errs, fmt.Errorf("arguments must consist of a resource or a resource and name")) + } + return b +} + +// ReplaceAliases accepts an argument and tries to expand any existing +// aliases found in it +func (b *Builder) ReplaceAliases(input string) string { + replaced := []string{} + for _, arg := range strings.Split(input, ",") { + if b.categoryExpanderFn == nil { + continue + } + categoryExpander, err := b.categoryExpanderFn() + if err != nil { + b.AddError(err) + continue + } + + if resources, ok := categoryExpander.Expand(arg); ok { + asStrings := []string{} + for _, resource := range resources { + if len(resource.Group) == 0 { + asStrings = append(asStrings, resource.Resource) + continue + } + asStrings = append(asStrings, resource.Resource+"."+resource.Group) + } + arg = strings.Join(asStrings, ",") + } + replaced = append(replaced, arg) + } + return strings.Join(replaced, ",") +} + +func hasCombinedTypeArgs(args []string) (bool, error) { + hasSlash := 0 + for _, s := range args { + if strings.Contains(s, "/") { + hasSlash++ + } + } + switch { + case hasSlash > 0 && hasSlash == len(args): + return true, nil + case hasSlash > 0 && hasSlash != len(args): + baseCmd := "cmd" + if len(os.Args) > 0 { + baseCmdSlice := strings.Split(os.Args[0], "/") + baseCmd = baseCmdSlice[len(baseCmdSlice)-1] + } + return true, fmt.Errorf("there is no need to specify a resource type as a separate argument when passing arguments in resource/name form (e.g. '%s get resource/' instead of '%s get resource resource/'", baseCmd, baseCmd) + default: + return false, nil + } +} + +// Normalize args convert multiple resources to resource tuples, a,b,c d +// as a transform to a/d b/d c/d +func normalizeMultipleResourcesArgs(args []string) []string { + if len(args) >= 2 { + resources := []string{} + resources = append(resources, SplitResourceArgument(args[0])...) + if len(resources) > 1 { + names := []string{} + names = append(names, args[1:]...) + newArgs := []string{} + for _, resource := range resources { + for _, name := range names { + newArgs = append(newArgs, strings.Join([]string{resource, name}, "/")) + } + } + return newArgs + } + } + return args +} + +// splitResourceTypeName handles type/name resource formats and returns a resource tuple +// (empty or not), whether it successfully found one, and an error +func splitResourceTypeName(s string) (resourceTuple, bool, error) { + if !strings.Contains(s, "/") { + return resourceTuple{}, false, nil + } + seg := strings.Split(s, "/") + if len(seg) != 2 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form may not have more than one slash") + } + resource, name := seg[0], seg[1] + if len(resource) == 0 || len(name) == 0 || len(SplitResourceArgument(resource)) != 1 { + return resourceTuple{}, false, fmt.Errorf("arguments in resource/name form must have a single resource and name") + } + return resourceTuple{Resource: resource, Name: name}, true, nil +} + +// Flatten will convert any objects with a field named "Items" that is an array of runtime.Object +// compatible types into individual entries and give them their own items. The original object +// is not passed to any visitors. +func (b *Builder) Flatten() *Builder { + b.flatten = true + return b +} + +// Latest will fetch the latest copy of any objects loaded from URLs or files from the server. +func (b *Builder) Latest() *Builder { + b.latest = true + return b +} + +// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. +func (b *Builder) RequireObject(require bool) *Builder { + b.requireObject = require + return b +} + +// ContinueOnError will attempt to load and visit as many objects as possible, even if some visits +// return errors or some objects cannot be loaded. The default behavior is to terminate after +// the first error is returned from a VisitorFunc. +func (b *Builder) ContinueOnError() *Builder { + b.continueOnError = true + return b +} + +// SingleResourceType will cause the builder to error if the user specifies more than a single type +// of resource. +func (b *Builder) SingleResourceType() *Builder { + b.singleResourceType = true + return b +} + +// mappingFor returns the RESTMapping for the Kind given, or the Kind referenced by the resource. +// Prefers a fully specified GroupVersionResource match. If one is not found, we match on a fully +// specified GroupVersionKind, or fallback to a match on GroupKind. +func (b *Builder) mappingFor(resourceOrKindArg string) (*meta.RESTMapping, error) { + fullySpecifiedGVR, groupResource := schema.ParseResourceArg(resourceOrKindArg) + gvk := schema.GroupVersionKind{} + restMapper, err := b.restMapperFn() + if err != nil { + return nil, err + } + + if fullySpecifiedGVR != nil { + gvk, _ = restMapper.KindFor(*fullySpecifiedGVR) + } + if gvk.Empty() { + gvk, _ = restMapper.KindFor(groupResource.WithVersion("")) + } + if !gvk.Empty() { + return restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + } + + fullySpecifiedGVK, groupKind := schema.ParseKindArg(resourceOrKindArg) + if fullySpecifiedGVK == nil { + gvk := groupKind.WithVersion("") + fullySpecifiedGVK = &gvk + } + + if !fullySpecifiedGVK.Empty() { + if mapping, err := restMapper.RESTMapping(fullySpecifiedGVK.GroupKind(), fullySpecifiedGVK.Version); err == nil { + return mapping, nil + } + } + + mapping, err := restMapper.RESTMapping(groupKind, gvk.Version) + if err != nil { + // if we error out here, it is because we could not match a resource or a kind + // for the given argument. To maintain consistency with previous behavior, + // announce that a resource type could not be found. + // if the error is a URL error, then we had trouble doing discovery, so we should return the original + // error since it may help a user diagnose what is actually wrong + if _, ok := err.(*url.Error); ok { + return nil, err + } + return nil, fmt.Errorf("the server doesn't have a resource type %q", groupResource.Resource) + } + + return mapping, nil +} + +func (b *Builder) resourceMappings() ([]*meta.RESTMapping, error) { + if len(b.resources) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + mappings := []*meta.RESTMapping{} + seen := map[schema.GroupVersionKind]bool{} + for _, r := range b.resources { + mapping, err := b.mappingFor(r) + if err != nil { + return nil, err + } + // This ensures the mappings for resources(shortcuts, plural) unique + if seen[mapping.GroupVersionKind] { + continue + } + seen[mapping.GroupVersionKind] = true + + mappings = append(mappings, mapping) + } + return mappings, nil +} + +func (b *Builder) resourceTupleMappings() (map[string]*meta.RESTMapping, error) { + mappings := make(map[string]*meta.RESTMapping) + canonical := make(map[schema.GroupVersionResource]struct{}) + for _, r := range b.resourceTuples { + if _, ok := mappings[r.Resource]; ok { + continue + } + mapping, err := b.mappingFor(r.Resource) + if err != nil { + return nil, err + } + + mappings[r.Resource] = mapping + canonical[mapping.Resource] = struct{}{} + } + if len(canonical) > 1 && b.singleResourceType { + return nil, fmt.Errorf("you may only specify a single resource type") + } + return mappings, nil +} + +func (b *Builder) visitorResult() *Result { + if len(b.errs) > 0 { + return &Result{err: utilerrors.NewAggregate(b.errs)} + } + + if b.selectAll { + selector := labels.Everything().String() + b.labelSelector = &selector + } + + // visit items specified by paths + if len(b.paths) != 0 { + return b.visitByPaths() + } + + // visit selectors + if b.labelSelector != nil || b.fieldSelector != nil { + return b.visitBySelector() + } + + // visit items specified by resource and name + if len(b.resourceTuples) != 0 { + return b.visitByResource() + } + + // visit items specified by name + if len(b.names) != 0 { + return b.visitByName() + } + + if len(b.resources) != 0 { + return &Result{err: fmt.Errorf("resource(s) were provided, but no name, label selector, or --all flag specified")} + } + return &Result{err: missingResourceError} +} + +func (b *Builder) visitBySelector() *Result { + result := &Result{ + targetsSingleItems: false, + } + + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a selector is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("selectors and the all flag cannot be used when passing resource/name arguments")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("at least one resource must be specified to use a selector")) + } + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + + var labelSelector, fieldSelector string + if b.labelSelector != nil { + labelSelector = *b.labelSelector + } + if b.fieldSelector != nil { + fieldSelector = *b.fieldSelector + } + + visitors := []Visitor{} + for _, mapping := range mappings { + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } + visitors = append(visitors, NewSelector(client, mapping, selectorNamespace, labelSelector, fieldSelector, b.export, b.includeUninitialized, b.limitChunks)) + } + if b.continueOnError { + result.visitor = EagerVisitorList(visitors) + } else { + result.visitor = VisitorList(visitors) + } + result.sources = visitors + return result +} + +func (b *Builder) getClient(gv schema.GroupVersion) (RESTClient, error) { + var ( + client RESTClient + err error + ) + + switch { + case b.fakeClientFn != nil: + client, err = b.fakeClientFn(gv) + case b.negotiatedSerializer != nil: + client, err = b.clientConfigFn.clientForGroupVersion(gv, b.negotiatedSerializer) + default: + client, err = b.clientConfigFn.unstructuredClientForGroupVersion(gv) + } + + if err != nil { + return nil, err + } + + return NewClientWithOptions(client, b.requestTransforms...), nil +} + +func (b *Builder) visitByResource() *Result { + // if b.singleItemImplied is false, this could be by default, so double-check length + // of resourceTuples to determine if in fact it is singleItemImplied or not + isSingleItemImplied := b.singleItemImplied + if !isSingleItemImplied { + isSingleItemImplied = len(b.resourceTuples) == 1 + } + + result := &Result{ + singleItemImplied: isSingleItemImplied, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("you may not specify individual resources and bulk resources in the same call")) + } + + // retrieve one client for each resource + mappings, err := b.resourceTupleMappings() + if err != nil { + result.err = err + return result + } + clients := make(map[string]RESTClient) + for _, mapping := range mappings { + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + if _, ok := clients[s]; ok { + continue + } + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + clients[s] = client + } + + items := []Visitor{} + for _, tuple := range b.resourceTuples { + mapping, ok := mappings[tuple.Resource] + if !ok { + return result.withError(fmt.Errorf("resource %q is not recognized: %v", tuple.Resource, mappings)) + } + s := fmt.Sprintf("%s/%s", mapping.GroupVersionKind.GroupVersion().String(), mapping.Resource.Resource) + client, ok := clients[s] + if !ok { + return result.withError(fmt.Errorf("could not find a client for resource %q", tuple.Resource)) + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: tuple.Name, + Export: b.export, + } + items = append(items, info) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(items) + } else { + visitors = VisitorList(items) + } + result.visitor = visitors + result.sources = items + return result +} + +func (b *Builder) visitByName() *Result { + result := &Result{ + singleItemImplied: len(b.names) == 1, + targetsSingleItems: true, + } + + if len(b.paths) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify a resource by arguments as well")) + } + if len(b.resources) == 0 { + return result.withError(fmt.Errorf("you must provide a resource and a resource name together")) + } + if len(b.resources) > 1 { + return result.withError(fmt.Errorf("you must specify only one resource")) + } + + mappings, err := b.resourceMappings() + if err != nil { + result.err = err + return result + } + mapping := mappings[0] + + client, err := b.getClient(mapping.GroupVersionKind.GroupVersion()) + if err != nil { + result.err = err + return result + } + + selectorNamespace := b.namespace + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + selectorNamespace = "" + } else { + if len(b.namespace) == 0 { + errMsg := "namespace may not be empty when retrieving a resource by name" + if b.allNamespace { + errMsg = "a resource cannot be retrieved by name across all namespaces" + } + return result.withError(fmt.Errorf(errMsg)) + } + } + + visitors := []Visitor{} + for _, name := range b.names { + info := &Info{ + Client: client, + Mapping: mapping, + Namespace: selectorNamespace, + Name: name, + Export: b.export, + } + visitors = append(visitors, info) + } + result.visitor = VisitorList(visitors) + result.sources = visitors + return result +} + +func (b *Builder) visitByPaths() *Result { + result := &Result{ + singleItemImplied: !b.dir && !b.stream && len(b.paths) == 1, + targetsSingleItems: true, + } + + if len(b.resources) != 0 { + return result.withError(fmt.Errorf("when paths, URLs, or stdin is provided as input, you may not specify resource arguments as well")) + } + if len(b.names) != 0 { + return result.withError(fmt.Errorf("name cannot be provided when a path is specified")) + } + if len(b.resourceTuples) != 0 { + return result.withError(fmt.Errorf("resource/name arguments cannot be provided when a path is specified")) + } + + var visitors Visitor + if b.continueOnError { + visitors = EagerVisitorList(b.paths) + } else { + visitors = VisitorList(b.paths) + } + + if b.flatten { + visitors = NewFlattenListVisitor(visitors, b.objectTyper, b.mapper) + } + + // only items from disk can be refetched + if b.latest { + // must set namespace prior to fetching + if b.defaultNamespace { + visitors = NewDecoratedVisitor(visitors, SetNamespace(b.namespace)) + } + visitors = NewDecoratedVisitor(visitors, RetrieveLatest) + } + if b.labelSelector != nil { + selector, err := labels.Parse(*b.labelSelector) + if err != nil { + return result.withError(fmt.Errorf("the provided selector %q is not valid: %v", *b.labelSelector, err)) + } + visitors = NewFilteredVisitor(visitors, FilterByLabelSelector(selector)) + } + result.visitor = visitors + result.sources = b.paths + return result +} + +// Do returns a Result object with a Visitor for the resources identified by the Builder. +// The visitor will respect the error behavior specified by ContinueOnError. Note that stream +// inputs are consumed by the first execution - use Infos() or Object() on the Result to capture a list +// for further iteration. +func (b *Builder) Do() *Result { + r := b.visitorResult() + r.mapper = b.Mapper() + if r.err != nil { + return r + } + if b.flatten { + r.visitor = NewFlattenListVisitor(r.visitor, b.objectTyper, b.mapper) + } + helpers := []VisitorFunc{} + if b.defaultNamespace { + helpers = append(helpers, SetNamespace(b.namespace)) + } + if b.requireNamespace { + helpers = append(helpers, RequireNamespace(b.namespace)) + } + helpers = append(helpers, FilterNamespace) + if b.requireObject { + helpers = append(helpers, RetrieveLazy) + } + if b.continueOnError { + r.visitor = NewDecoratedVisitor(ContinueOnErrorVisitor{r.visitor}, helpers...) + } else { + r.visitor = NewDecoratedVisitor(r.visitor, helpers...) + } + return r +} + +// SplitResourceArgument splits the argument with commas and returns unique +// strings in the original order. +func SplitResourceArgument(arg string) []string { + out := []string{} + set := sets.NewString() + for _, s := range strings.Split(arg, ",") { + if set.Has(s) { + continue + } + set.Insert(s) + out = append(out, s) + } + return out +} + +// HasNames returns true if the provided args contain resource names +func HasNames(args []string) (bool, error) { + args = normalizeMultipleResourcesArgs(args) + hasCombinedTypes, err := hasCombinedTypeArgs(args) + if err != nil { + return false, err + } + return hasCombinedTypes || len(args) > 1, nil +} + +type cachingRESTMapperFunc struct { + delegate RESTMapperFunc + + lock sync.Mutex + cached meta.RESTMapper +} + +func (c *cachingRESTMapperFunc) ToRESTMapper() (meta.RESTMapper, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.cached != nil { + return c.cached, nil + } + + ret, err := c.delegate() + if err != nil { + return nil, err + } + c.cached = ret + return c.cached, nil +} + +type cachingCategoryExpanderFunc struct { + delegate CategoryExpanderFunc + + lock sync.Mutex + cached restmapper.CategoryExpander +} + +func (c *cachingCategoryExpanderFunc) ToCategoryExpander() (restmapper.CategoryExpander, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.cached != nil { + return c.cached, nil + } + + ret, err := c.delegate() + if err != nil { + return nil, err + } + c.cached = ret + return c.cached, nil +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/client.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/client.go new file mode 100644 index 0000000000..46380207f3 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/client.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +// TODO require negotiatedSerializer. leaving it optional lets us plumb current behavior and deal with the difference after major plumbing is complete +func (clientConfigFn ClientConfigFunc) clientForGroupVersion(gv schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + if negotiatedSerializer != nil { + cfg.ContentConfig.NegotiatedSerializer = negotiatedSerializer + } + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} + +func (clientConfigFn ClientConfigFunc) unstructuredClientForGroupVersion(gv schema.GroupVersion) (RESTClient, error) { + cfg, err := clientConfigFn() + if err != nil { + return nil, err + } + cfg.ContentConfig = UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + if len(gv.Group) == 0 { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + + return rest.RESTClientFor(cfg) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/doc.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/doc.go new file mode 100644 index 0000000000..a0e22e7cf7 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resource assists clients in dealing with RESTful objects that match the +// Kubernetes API conventions. The Helper object provides simple CRUD operations +// on resources. The Visitor interface makes it easy to deal with multiple resources +// in bulk for retrieval and operation. The Builder object simplifies converting +// standard command line arguments and parameters into a Visitor that can iterate +// over all of the identified resources, whether on the server or on the local +// filesystem. +package resource diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/fake.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/fake.go new file mode 100644 index 0000000000..276c343e21 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/fake.go @@ -0,0 +1,40 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/restmapper" +) + +// FakeCategoryExpander is for testing only +var FakeCategoryExpander restmapper.CategoryExpander = restmapper.SimpleCategoryExpander{ + Expansions: map[string][]schema.GroupResource{ + "all": { + {Group: "", Resource: "pods"}, + {Group: "", Resource: "replicationcontrollers"}, + {Group: "", Resource: "services"}, + {Group: "apps", Resource: "statefulsets"}, + {Group: "autoscaling", Resource: "horizontalpodautoscalers"}, + {Group: "batch", Resource: "jobs"}, + {Group: "batch", Resource: "cronjobs"}, + {Group: "extensions", Resource: "daemonsets"}, + {Group: "extensions", Resource: "deployments"}, + {Group: "extensions", Resource: "replicasets"}, + }, + }, +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/helper.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/helper.go new file mode 100644 index 0000000000..059d518af2 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/helper.go @@ -0,0 +1,185 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +var metadataAccessor = meta.NewAccessor() + +// Helper provides methods for retrieving or mutating a RESTful +// resource. +type Helper struct { + // The name of this resource as the server would recognize it + Resource string + // A RESTClient capable of mutating this resource. + RESTClient RESTClient + // True if the resource type is scoped to namespaces + NamespaceScoped bool +} + +// NewHelper creates a Helper from a ResourceMapping +func NewHelper(client RESTClient, mapping *meta.RESTMapping) *Helper { + return &Helper{ + Resource: mapping.Resource.Resource, + RESTClient: client, + NamespaceScoped: mapping.Scope.Name() == meta.RESTScopeNameNamespace, + } +} + +func (m *Helper) Get(namespace, name string, export bool) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name) + if export { + // TODO: I should be part of GetOptions + req.Param("export", strconv.FormatBool(export)) + } + return req.Do().Get() +} + +func (m *Helper) List(namespace, apiVersion string, export bool, options *metav1.ListOptions) (runtime.Object, error) { + req := m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec) + if export { + // TODO: I should be part of ListOptions + req.Param("export", strconv.FormatBool(export)) + } + return req.Do().Get() +} + +func (m *Helper) Watch(namespace, apiVersion string, options *metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(options, metav1.ParameterCodec). + Watch() +} + +func (m *Helper) WatchSingle(namespace, name, resourceVersion string) (watch.Interface, error) { + return m.RESTClient.Get(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + VersionedParams(&metav1.ListOptions{ + ResourceVersion: resourceVersion, + Watch: true, + FieldSelector: fields.OneTermEqualSelector("metadata.name", name).String(), + }, metav1.ParameterCodec). + Watch() +} + +func (m *Helper) Delete(namespace, name string) (runtime.Object, error) { + return m.DeleteWithOptions(namespace, name, nil) +} + +func (m *Helper) DeleteWithOptions(namespace, name string, options *metav1.DeleteOptions) (runtime.Object, error) { + return m.RESTClient.Delete(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + Body(options). + Do(). + Get() +} + +func (m *Helper) Create(namespace string, modify bool, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.CreateOptions{} + } + if modify { + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to clear the version on this object, so send it to the server as is + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) + } + if version != "" { + if err := metadataAccessor.SetResourceVersion(obj, ""); err != nil { + return nil, err + } + } + } + + return m.createResource(m.RESTClient, m.Resource, namespace, obj, options) +} + +func (m *Helper) createResource(c RESTClient, resource, namespace string, obj runtime.Object, options *metav1.CreateOptions) (runtime.Object, error) { + return c.Post(). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(resource). + VersionedParams(options, metav1.ParameterCodec). + Body(obj). + Do(). + Get() +} +func (m *Helper) Patch(namespace, name string, pt types.PatchType, data []byte, options *metav1.UpdateOptions) (runtime.Object, error) { + if options == nil { + options = &metav1.UpdateOptions{} + } + return m.RESTClient.Patch(pt). + NamespaceIfScoped(namespace, m.NamespaceScoped). + Resource(m.Resource). + Name(name). + VersionedParams(options, metav1.ParameterCodec). + Body(data). + Do(). + Get() +} + +func (m *Helper) Replace(namespace, name string, overwrite bool, obj runtime.Object) (runtime.Object, error) { + c := m.RESTClient + + // Attempt to version the object based on client logic. + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + // We don't know how to version this object, so send it to the server as is + return m.replaceResource(c, m.Resource, namespace, name, obj) + } + if version == "" && overwrite { + // Retrieve the current version of the object to overwrite the server object + serverObj, err := c.Get().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(m.Resource).Name(name).Do().Get() + if err != nil { + // The object does not exist, but we want it to be created + return m.replaceResource(c, m.Resource, namespace, name, obj) + } + serverVersion, err := metadataAccessor.ResourceVersion(serverObj) + if err != nil { + return nil, err + } + if err := metadataAccessor.SetResourceVersion(obj, serverVersion); err != nil { + return nil, err + } + } + + return m.replaceResource(c, m.Resource, namespace, name, obj) +} + +func (m *Helper) replaceResource(c RESTClient, resource, namespace, name string, obj runtime.Object) (runtime.Object, error) { + return c.Put().NamespaceIfScoped(namespace, m.NamespaceScoped).Resource(resource).Name(name).Body(obj).Do().Get() +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/interfaces.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/interfaces.go new file mode 100644 index 0000000000..29d7b34ab6 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/interfaces.go @@ -0,0 +1,103 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +type RESTClientGetter interface { + ToRESTConfig() (*rest.Config, error) + ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) + ToRESTMapper() (meta.RESTMapper, error) +} + +type ClientConfigFunc func() (*rest.Config, error) +type RESTMapperFunc func() (meta.RESTMapper, error) +type CategoryExpanderFunc func() (restmapper.CategoryExpander, error) + +// RESTClient is a client helper for dealing with RESTful resources +// in a generic way. +type RESTClient interface { + Get() *rest.Request + Post() *rest.Request + Patch(types.PatchType) *rest.Request + Delete() *rest.Request + Put() *rest.Request +} + +// RequestTransform is a function that is given a chance to modify the outgoing request. +type RequestTransform func(*rest.Request) + +// NewClientWithOptions wraps the provided RESTClient and invokes each transform on each +// newly created request. +func NewClientWithOptions(c RESTClient, transforms ...RequestTransform) RESTClient { + if len(transforms) == 0 { + return c + } + return &clientOptions{c: c, transforms: transforms} +} + +type clientOptions struct { + c RESTClient + transforms []RequestTransform +} + +func (c *clientOptions) modify(req *rest.Request) *rest.Request { + for _, transform := range c.transforms { + transform(req) + } + return req +} + +func (c *clientOptions) Get() *rest.Request { + return c.modify(c.c.Get()) +} + +func (c *clientOptions) Post() *rest.Request { + return c.modify(c.c.Post()) +} +func (c *clientOptions) Patch(t types.PatchType) *rest.Request { + return c.modify(c.c.Patch(t)) +} +func (c *clientOptions) Delete() *rest.Request { + return c.modify(c.c.Delete()) +} +func (c *clientOptions) Put() *rest.Request { + return c.modify(c.c.Put()) +} + +// ContentValidator is an interface that knows how to validate an API object serialized to a byte array. +type ContentValidator interface { + ValidateBytes(data []byte) error +} + +// Visitor lets clients walk a list of resources. +type Visitor interface { + Visit(VisitorFunc) error +} + +// VisitorFunc implements the Visitor interface for a matching function. +// If there was a problem walking a list of resources, the incoming error +// will describe the problem and the function can decide how to handle that error. +// A nil returned indicates to accept an error to continue loops even when errors happen. +// This is useful for ignoring certain kinds of errors or aggregating errors in some way. +type VisitorFunc func(*Info, error) error diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/mapper.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/mapper.go new file mode 100644 index 0000000000..962f37711f --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/mapper.go @@ -0,0 +1,161 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Mapper is a convenience struct for holding references to the interfaces +// needed to create Info for arbitrary objects. +type mapper struct { + // localFn indicates the call can't make server requests + localFn func() bool + + restMapperFn RESTMapperFunc + clientFn func(version schema.GroupVersion) (RESTClient, error) + decoder runtime.Decoder +} + +// InfoForData creates an Info object for the given data. An error is returned +// if any of the decoding or client lookup steps fail. Name and namespace will be +// set into Info if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForData(data []byte, source string) (*Info, error) { + obj, gvk, err := m.decoder.Decode(data, nil, nil) + if err != nil { + return nil, fmt.Errorf("unable to decode %q: %v", source, err) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + + ret := &Info{ + Source: source, + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %q: %v", source, err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// InfoForObject creates an Info object for the given Object. An error is returned +// if the object cannot be introspected. Name and namespace will be set into Info +// if the mapping's MetadataAccessor can retrieve them. +func (m *mapper) infoForObject(obj runtime.Object, typer runtime.ObjectTyper, preferredGVKs []schema.GroupVersionKind) (*Info, error) { + groupVersionKinds, _, err := typer.ObjectKinds(obj) + if err != nil { + return nil, fmt.Errorf("unable to get type info from the object %q: %v", reflect.TypeOf(obj), err) + } + + gvk := groupVersionKinds[0] + if len(groupVersionKinds) > 1 && len(preferredGVKs) > 0 { + gvk = preferredObjectKind(groupVersionKinds, preferredGVKs) + } + + name, _ := metadataAccessor.Name(obj) + namespace, _ := metadataAccessor.Namespace(obj) + resourceVersion, _ := metadataAccessor.ResourceVersion(obj) + ret := &Info{ + Namespace: namespace, + Name: name, + ResourceVersion: resourceVersion, + + Object: obj, + } + + if m.localFn == nil || !m.localFn() { + restMapper, err := m.restMapperFn() + if err != nil { + return nil, err + } + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, fmt.Errorf("unable to recognize %v", err) + } + ret.Mapping = mapping + + client, err := m.clientFn(gvk.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("unable to connect to a server to handle %q: %v", mapping.Resource, err) + } + ret.Client = client + } + + return ret, nil +} + +// preferredObjectKind picks the possibility that most closely matches the priority list in this order: +// GroupVersionKind matches (exact match) +// GroupKind matches +// Group matches +func preferredObjectKind(possibilities []schema.GroupVersionKind, preferences []schema.GroupVersionKind) schema.GroupVersionKind { + // Exact match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility == priority { + return possibility + } + } + } + + // GroupKind match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.GroupKind() == priority.GroupKind() { + return possibility + } + } + } + + // Group match + for _, priority := range preferences { + for _, possibility := range possibilities { + if possibility.Group == priority.Group { + return possibility + } + } + } + + // Just pick the first + return possibilities[0] +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/result.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/result.go new file mode 100644 index 0000000000..b8722afe69 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/result.go @@ -0,0 +1,242 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "reflect" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" +) + +// ErrMatchFunc can be used to filter errors that may not be true failures. +type ErrMatchFunc func(error) bool + +// Result contains helper methods for dealing with the outcome of a Builder. +type Result struct { + err error + visitor Visitor + + sources []Visitor + singleItemImplied bool + targetsSingleItems bool + + mapper *mapper + ignoreErrors []utilerrors.Matcher + + // populated by a call to Infos + info []*Info +} + +// withError allows a fluent style for internal result code. +func (r *Result) withError(err error) *Result { + r.err = err + return r +} + +// TargetsSingleItems returns true if any of the builder arguments pointed +// to non-list calls (if the user explicitly asked for any object by name). +// This includes directories, streams, URLs, and resource name tuples. +func (r *Result) TargetsSingleItems() bool { + return r.targetsSingleItems +} + +// IgnoreErrors will filter errors that occur when by visiting the result +// (but not errors that occur by creating the result in the first place), +// eliminating any that match fns. This is best used in combination with +// Builder.ContinueOnError(), where the visitors accumulate errors and return +// them after visiting as a slice of errors. If no errors remain after +// filtering, the various visitor methods on Result will return nil for +// err. +func (r *Result) IgnoreErrors(fns ...ErrMatchFunc) *Result { + for _, fn := range fns { + r.ignoreErrors = append(r.ignoreErrors, utilerrors.Matcher(fn)) + } + return r +} + +// Mapper returns a copy of the builder's mapper. +func (r *Result) Mapper() *mapper { + return r.mapper +} + +// Err returns one or more errors (via a util.ErrorList) that occurred prior +// to visiting the elements in the visitor. To see all errors including those +// that occur during visitation, invoke Infos(). +func (r *Result) Err() error { + return r.err +} + +// Visit implements the Visitor interface on the items described in the Builder. +// Note that some visitor sources are not traversable more than once, or may +// return different results. If you wish to operate on the same set of resources +// multiple times, use the Infos() method. +func (r *Result) Visit(fn VisitorFunc) error { + if r.err != nil { + return r.err + } + err := r.visitor.Visit(fn) + return utilerrors.FilterOut(err, r.ignoreErrors...) +} + +// IntoSingleItemImplied sets the provided boolean pointer to true if the Builder input +// implies a single item, or multiple. +func (r *Result) IntoSingleItemImplied(b *bool) *Result { + *b = r.singleItemImplied + return r +} + +// Infos returns an array of all of the resource infos retrieved via traversal. +// Will attempt to traverse the entire set of visitors only once, and will return +// a cached list on subsequent calls. +func (r *Result) Infos() ([]*Info, error) { + if r.err != nil { + return nil, r.err + } + if r.info != nil { + return r.info, nil + } + + infos := []*Info{} + err := r.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + infos = append(infos, info) + return nil + }) + err = utilerrors.FilterOut(err, r.ignoreErrors...) + + r.info, r.err = infos, err + return infos, err +} + +// Object returns a single object representing the output of a single visit to all +// found resources. If the Builder was a singular context (expected to return a +// single resource by user input) and only a single resource was found, the resource +// will be returned as is. Otherwise, the returned resources will be part of an +// v1.List. The ResourceVersion of the v1.List will be set only if it is identical +// across all infos returned. +func (r *Result) Object() (runtime.Object, error) { + infos, err := r.Infos() + if err != nil { + return nil, err + } + + versions := sets.String{} + objects := []runtime.Object{} + for _, info := range infos { + if info.Object != nil { + objects = append(objects, info.Object) + versions.Insert(info.ResourceVersion) + } + } + + if len(objects) == 1 { + if r.singleItemImplied { + return objects[0], nil + } + // if the item is a list already, don't create another list + if meta.IsListType(objects[0]) { + return objects[0], nil + } + } + + version := "" + if len(versions) == 1 { + version = versions.List()[0] + } + + return toV1List(objects, version), err +} + +// Compile time check to enforce that list implements the necessary interface +var _ metav1.ListInterface = &v1.List{} +var _ metav1.ListMetaAccessor = &v1.List{} + +// toV1List takes a slice of Objects + their version, and returns +// a v1.List Object containing the objects in the Items field +func toV1List(objects []runtime.Object, version string) runtime.Object { + raw := []runtime.RawExtension{} + for _, o := range objects { + raw = append(raw, runtime.RawExtension{Object: o}) + } + return &v1.List{ + ListMeta: metav1.ListMeta{ + ResourceVersion: version, + }, + Items: raw, + } +} + +// ResourceMapping returns a single meta.RESTMapping representing the +// resources located by the builder, or an error if more than one +// mapping was found. +func (r *Result) ResourceMapping() (*meta.RESTMapping, error) { + if r.err != nil { + return nil, r.err + } + mappings := map[schema.GroupVersionResource]*meta.RESTMapping{} + for i := range r.sources { + m, ok := r.sources[i].(ResourceMapping) + if !ok { + return nil, fmt.Errorf("a resource mapping could not be loaded from %v", reflect.TypeOf(r.sources[i])) + } + mapping := m.ResourceMapping() + mappings[mapping.Resource] = mapping + } + if len(mappings) != 1 { + return nil, fmt.Errorf("expected only a single resource type") + } + for _, mapping := range mappings { + return mapping, nil + } + return nil, nil +} + +// Watch retrieves changes that occur on the server to the specified resource. +// It currently supports watching a single source - if the resource source +// (selectors or pure types) can be watched, they will be, otherwise the list +// will be visited (equivalent to the Infos() call) and if there is a single +// resource present, it will be watched, otherwise an error will be returned. +func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { + if r.err != nil { + return nil, r.err + } + if len(r.sources) != 1 { + return nil, fmt.Errorf("you may only watch a single resource or type of resource at a time") + } + w, ok := r.sources[0].(Watchable) + if !ok { + info, err := r.Infos() + if err != nil { + return nil, err + } + if len(info) != 1 { + return nil, fmt.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(info)) + } + return info[0].Watch(resourceVersion) + } + return w.Watch(resourceVersion) +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/scheme.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/scheme.go new file mode 100644 index 0000000000..fef6edfc1b --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/scheme.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "encoding/json" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" +) + +// dynamicCodec is a codec that wraps the standard unstructured codec +// with special handling for Status objects. +// Deprecated only used by test code and its wrong +type dynamicCodec struct{} + +func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj) + if err != nil { + return nil, nil, err + } + + if _, ok := obj.(*metav1.Status); !ok && strings.ToLower(gvk.Kind) == "status" { + obj = &metav1.Status{} + err := json.Unmarshal(data, obj) + if err != nil { + return nil, nil, err + } + } + + return obj, gvk, nil +} + +func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { + return unstructured.UnstructuredJSONScheme.Encode(obj, w) +} + +// ContentConfig returns a rest.ContentConfig for dynamic types. It includes enough codecs to act as a "normal" +// serializer for the rest.client with options, status and the like. +func UnstructuredPlusDefaultContentConfig() rest.ContentConfig { + var jsonInfo runtime.SerializerInfo + // TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need + // to talk to a kubernetes server + for _, info := range scheme.Codecs.SupportedMediaTypes() { + if info.MediaType == runtime.ContentTypeJSON { + jsonInfo = info + break + } + } + + jsonInfo.Serializer = dynamicCodec{} + jsonInfo.PrettySerializer = nil + return rest.ContentConfig{ + AcceptContentTypes: runtime.ContentTypeJSON, + ContentType: runtime.ContentTypeJSON, + NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), + } +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/selector.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/selector.go new file mode 100644 index 0000000000..f36508bd4a --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/selector.go @@ -0,0 +1,121 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" +) + +// Selector is a Visitor for resources that match a label selector. +type Selector struct { + Client RESTClient + Mapping *meta.RESTMapping + Namespace string + LabelSelector string + FieldSelector string + Export bool + IncludeUninitialized bool + LimitChunks int64 +} + +// NewSelector creates a resource selector which hides details of getting items by their label selector. +func NewSelector(client RESTClient, mapping *meta.RESTMapping, namespace, labelSelector, fieldSelector string, export, includeUninitialized bool, limitChunks int64) *Selector { + return &Selector{ + Client: client, + Mapping: mapping, + Namespace: namespace, + LabelSelector: labelSelector, + FieldSelector: fieldSelector, + Export: export, + IncludeUninitialized: includeUninitialized, + LimitChunks: limitChunks, + } +} + +// Visit implements Visitor and uses request chunking by default. +func (r *Selector) Visit(fn VisitorFunc) error { + var continueToken string + for { + list, err := NewHelper(r.Client, r.Mapping).List( + r.Namespace, + r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + r.Export, + &metav1.ListOptions{ + LabelSelector: r.LabelSelector, + FieldSelector: r.FieldSelector, + IncludeUninitialized: r.IncludeUninitialized, + Limit: r.LimitChunks, + Continue: continueToken, + }, + ) + if err != nil { + if errors.IsResourceExpired(err) { + return err + } + if errors.IsBadRequest(err) || errors.IsNotFound(err) { + if se, ok := err.(*errors.StatusError); ok { + // modify the message without hiding this is an API error + if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { + se.ErrStatus.Message = fmt.Sprintf("Unable to list %q: %v", r.Mapping.Resource, se.ErrStatus.Message) + } else { + se.ErrStatus.Message = fmt.Sprintf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, se.ErrStatus.Message) + } + return se + } + if len(r.LabelSelector) == 0 && len(r.FieldSelector) == 0 { + return fmt.Errorf("Unable to list %q: %v", r.Mapping.Resource, err) + } + return fmt.Errorf("Unable to find %q that match label selector %q, field selector %q: %v", r.Mapping.Resource, r.LabelSelector, r.FieldSelector, err) + } + return err + } + resourceVersion, _ := metadataAccessor.ResourceVersion(list) + nextContinueToken, _ := metadataAccessor.Continue(list) + info := &Info{ + Client: r.Client, + Mapping: r.Mapping, + + Namespace: r.Namespace, + ResourceVersion: resourceVersion, + + Object: list, + } + + if err := fn(info, nil); err != nil { + return err + } + if len(nextContinueToken) == 0 { + return nil + } + continueToken = nextContinueToken + } +} + +func (r *Selector) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(r.Client, r.Mapping).Watch(r.Namespace, r.ResourceMapping().GroupVersionKind.GroupVersion().String(), + &metav1.ListOptions{ResourceVersion: resourceVersion, LabelSelector: r.LabelSelector, FieldSelector: r.FieldSelector}) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (r *Selector) ResourceMapping() *meta.RESTMapping { + return r.Mapping +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go new file mode 100644 index 0000000000..32c1a691a5 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/resource/visitor.go @@ -0,0 +1,723 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" + + "golang.org/x/text/encoding/unicode" + "golang.org/x/text/transform" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apimachinery/pkg/watch" +) + +const ( + constSTDINstr string = "STDIN" + stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" +) + +// Watchable describes a resource that can be watched for changes that occur on the server, +// beginning after the provided resource version. +type Watchable interface { + Watch(resourceVersion string) (watch.Interface, error) +} + +// ResourceMapping allows an object to return the resource mapping associated with +// the resource or resources it represents. +type ResourceMapping interface { + ResourceMapping() *meta.RESTMapping +} + +// Info contains temporary info to execute a REST call, or show the results +// of an already completed REST call. +type Info struct { + // Client will only be present if this builder was not local + Client RESTClient + // Mapping will only be present if this builder was not local + Mapping *meta.RESTMapping + + // Namespace will be set if the object is namespaced and has a specified value. + Namespace string + Name string + + // Optional, Source is the filename or URL to template file (.json or .yaml), + // or stdin to use to handle the resource + Source string + // Optional, this is the most recent value returned by the server if available. It will + // typically be in unstructured or internal forms, depending on how the Builder was + // defined. If retrieved from the server, the Builder expects the mapping client to + // decide the final form. Use the AsVersioned, AsUnstructured, and AsInternal helpers + // to alter the object versions. + Object runtime.Object + // Optional, this is the most recent resource version the server knows about for + // this type of resource. It may not match the resource version of the object, + // but if set it should be equal to or newer than the resource version of the + // object (however the server defines resource version). + ResourceVersion string + // Optional, should this resource be exported, stripped of cluster-specific and instance specific fields + Export bool +} + +// Visit implements Visitor +func (i *Info) Visit(fn VisitorFunc) error { + return fn(i, nil) +} + +// Get retrieves the object from the Namespace and Name fields +func (i *Info) Get() (err error) { + obj, err := NewHelper(i.Client, i.Mapping).Get(i.Namespace, i.Name, i.Export) + if err != nil { + if errors.IsNotFound(err) && len(i.Namespace) > 0 && i.Namespace != metav1.NamespaceDefault && i.Namespace != metav1.NamespaceAll { + err2 := i.Client.Get().AbsPath("api", "v1", "namespaces", i.Namespace).Do().Error() + if err2 != nil && errors.IsNotFound(err2) { + return err2 + } + } + return err + } + i.Object = obj + i.ResourceVersion, _ = metadataAccessor.ResourceVersion(obj) + return nil +} + +// Refresh updates the object with another object. If ignoreError is set +// the Object will be updated even if name, namespace, or resourceVersion +// attributes cannot be loaded from the object. +func (i *Info) Refresh(obj runtime.Object, ignoreError bool) error { + name, err := metadataAccessor.Name(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Name = name + } + namespace, err := metadataAccessor.Namespace(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.Namespace = namespace + } + version, err := metadataAccessor.ResourceVersion(obj) + if err != nil { + if !ignoreError { + return err + } + } else { + i.ResourceVersion = version + } + i.Object = obj + return nil +} + +// String returns the general purpose string representation +func (i *Info) String() string { + basicInfo := fmt.Sprintf("Name: %q, Namespace: %q\nObject: %+q", i.Name, i.Namespace, i.Object) + if i.Mapping != nil { + mappingInfo := fmt.Sprintf("Resource: %q, GroupVersionKind: %q", i.Mapping.Resource.String(), + i.Mapping.GroupVersionKind.String()) + return fmt.Sprint(mappingInfo, "\n", basicInfo) + } + return basicInfo +} + +// Namespaced returns true if the object belongs to a namespace +func (i *Info) Namespaced() bool { + return i.Mapping != nil && i.Mapping.Scope.Name() == meta.RESTScopeNameNamespace +} + +// Watch returns server changes to this object after it was retrieved. +func (i *Info) Watch(resourceVersion string) (watch.Interface, error) { + return NewHelper(i.Client, i.Mapping).WatchSingle(i.Namespace, i.Name, resourceVersion) +} + +// ResourceMapping returns the mapping for this resource and implements ResourceMapping +func (i *Info) ResourceMapping() *meta.RESTMapping { + return i.Mapping +} + +// VisitorList implements Visit for the sub visitors it contains. The first error +// returned from a child Visitor will terminate iteration. +type VisitorList []Visitor + +// Visit implements Visitor +func (l VisitorList) Visit(fn VisitorFunc) error { + for i := range l { + if err := l[i].Visit(fn); err != nil { + return err + } + } + return nil +} + +// EagerVisitorList implements Visit for the sub visitors it contains. All errors +// will be captured and returned at the end of iteration. +type EagerVisitorList []Visitor + +// Visit implements Visitor, and gathers errors that occur during processing until +// all sub visitors have been visited. +func (l EagerVisitorList) Visit(fn VisitorFunc) error { + errs := []error(nil) + for i := range l { + if err := l[i].Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }); err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) +} + +func ValidateSchema(data []byte, schema ContentValidator) error { + if schema == nil { + return nil + } + if err := schema.ValidateBytes(data); err != nil { + return fmt.Errorf("error validating data: %v; %s", err, stopValidateMessage) + } + return nil +} + +// URLVisitor downloads the contents of a URL, and if successful, returns +// an info object representing the downloaded object. +type URLVisitor struct { + URL *url.URL + *StreamVisitor + HttpAttemptCount int +} + +func (v *URLVisitor) Visit(fn VisitorFunc) error { + body, err := readHttpWithRetries(httpgetImpl, time.Second, v.URL.String(), v.HttpAttemptCount) + if err != nil { + return err + } + defer body.Close() + v.StreamVisitor.Reader = body + return v.StreamVisitor.Visit(fn) +} + +// readHttpWithRetries tries to http.Get the v.URL retries times before giving up. +func readHttpWithRetries(get httpget, duration time.Duration, u string, attempts int) (io.ReadCloser, error) { + var err error + var body io.ReadCloser + if attempts <= 0 { + return nil, fmt.Errorf("http attempts must be greater than 0, was %d", attempts) + } + for i := 0; i < attempts; i++ { + var statusCode int + var status string + if i > 0 { + time.Sleep(duration) + } + + // Try to get the URL + statusCode, status, body, err = get(u) + + // Retry Errors + if err != nil { + continue + } + + // Error - Set the error condition from the StatusCode + if statusCode != http.StatusOK { + err = fmt.Errorf("unable to read URL %q, server reported %s, status code=%d", u, status, statusCode) + } + + if statusCode >= 500 && statusCode < 600 { + // Retry 500's + continue + } else { + // Don't retry other StatusCodes + break + } + } + return body, err +} + +// httpget Defines function to retrieve a url and return the results. Exists for unit test stubbing. +type httpget func(url string) (int, string, io.ReadCloser, error) + +// httpgetImpl Implements a function to retrieve a url and return the results. +func httpgetImpl(url string) (int, string, io.ReadCloser, error) { + resp, err := http.Get(url) + if err != nil { + return 0, "", nil, err + } + return resp.StatusCode, resp.Status, resp.Body, nil +} + +// DecoratedVisitor will invoke the decorators in order prior to invoking the visitor function +// passed to Visit. An error will terminate the visit. +type DecoratedVisitor struct { + visitor Visitor + decorators []VisitorFunc +} + +// NewDecoratedVisitor will create a visitor that invokes the provided visitor functions before +// the user supplied visitor function is invoked, giving them the opportunity to mutate the Info +// object or terminate early with an error. +func NewDecoratedVisitor(v Visitor, fn ...VisitorFunc) Visitor { + if len(fn) == 0 { + return v + } + return DecoratedVisitor{v, fn} +} + +// Visit implements Visitor +func (v DecoratedVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for i := range v.decorators { + if err := v.decorators[i](info, nil); err != nil { + return err + } + } + return fn(info, nil) + }) +} + +// ContinueOnErrorVisitor visits each item and, if an error occurs on +// any individual item, returns an aggregate error after all items +// are visited. +type ContinueOnErrorVisitor struct { + Visitor +} + +// Visit returns nil if no error occurs during traversal, a regular +// error if one occurs, or if multiple errors occur, an aggregate +// error. If the provided visitor fails on any individual item it +// will not prevent the remaining items from being visited. An error +// returned by the visitor directly may still result in some items +// not being visited. +func (v ContinueOnErrorVisitor) Visit(fn VisitorFunc) error { + errs := []error{} + err := v.Visitor.Visit(func(info *Info, err error) error { + if err != nil { + errs = append(errs, err) + return nil + } + if err := fn(info, nil); err != nil { + errs = append(errs, err) + } + return nil + }) + if err != nil { + errs = append(errs, err) + } + if len(errs) == 1 { + return errs[0] + } + return utilerrors.NewAggregate(errs) +} + +// FlattenListVisitor flattens any objects that runtime.ExtractList recognizes as a list +// - has an "Items" public field that is a slice of runtime.Objects or objects satisfying +// that interface - into multiple Infos. An error on any sub item (for instance, if a List +// contains an object that does not have a registered client or resource) will terminate +// the visit. +// TODO: allow errors to be aggregated? +type FlattenListVisitor struct { + visitor Visitor + typer runtime.ObjectTyper + mapper *mapper +} + +// NewFlattenListVisitor creates a visitor that will expand list style runtime.Objects +// into individual items and then visit them individually. +func NewFlattenListVisitor(v Visitor, typer runtime.ObjectTyper, mapper *mapper) Visitor { + return FlattenListVisitor{v, typer, mapper} +} + +func (v FlattenListVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return fn(info, nil) + } + if !meta.IsListType(info.Object) { + return fn(info, nil) + } + + items := []runtime.Object{} + itemsToProcess := []runtime.Object{info.Object} + + for i := 0; i < len(itemsToProcess); i++ { + currObj := itemsToProcess[i] + if !meta.IsListType(currObj) { + items = append(items, currObj) + continue + } + + currItems, err := meta.ExtractList(currObj) + if err != nil { + return err + } + if errs := runtime.DecodeList(currItems, v.mapper.decoder); len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + itemsToProcess = append(itemsToProcess, currItems...) + } + + // If we have a GroupVersionKind on the list, prioritize that when asking for info on the objects contained in the list + var preferredGVKs []schema.GroupVersionKind + if info.Mapping != nil && !info.Mapping.GroupVersionKind.Empty() { + preferredGVKs = append(preferredGVKs, info.Mapping.GroupVersionKind) + } + + for i := range items { + item, err := v.mapper.infoForObject(items[i], v.typer, preferredGVKs) + if err != nil { + return err + } + if len(info.ResourceVersion) != 0 { + item.ResourceVersion = info.ResourceVersion + } + if err := fn(item, nil); err != nil { + return err + } + } + return nil + }) +} + +func ignoreFile(path string, extensions []string) bool { + if len(extensions) == 0 { + return false + } + ext := filepath.Ext(path) + for _, s := range extensions { + if s == ext { + return false + } + } + return true +} + +// FileVisitorForSTDIN return a special FileVisitor just for STDIN +func FileVisitorForSTDIN(mapper *mapper, schema ContentValidator) Visitor { + return &FileVisitor{ + Path: constSTDINstr, + StreamVisitor: NewStreamVisitor(nil, mapper, constSTDINstr, schema), + } +} + +// ExpandPathsToFileVisitors will return a slice of FileVisitors that will handle files from the provided path. +// After FileVisitors open the files, they will pass an io.Reader to a StreamVisitor to do the reading. (stdin +// is also taken care of). Paths argument also accepts a single file, and will return a single visitor +func ExpandPathsToFileVisitors(mapper *mapper, paths string, recursive bool, extensions []string, schema ContentValidator) ([]Visitor, error) { + var visitors []Visitor + err := filepath.Walk(paths, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if fi.IsDir() { + if path != paths && !recursive { + return filepath.SkipDir + } + return nil + } + // Don't check extension if the filepath was passed explicitly + if path != paths && ignoreFile(path, extensions) { + return nil + } + + visitor := &FileVisitor{ + Path: path, + StreamVisitor: NewStreamVisitor(nil, mapper, path, schema), + } + + visitors = append(visitors, visitor) + return nil + }) + + if err != nil { + return nil, err + } + return visitors, nil +} + +// FileVisitor is wrapping around a StreamVisitor, to handle open/close files +type FileVisitor struct { + Path string + *StreamVisitor +} + +// Visit in a FileVisitor is just taking care of opening/closing files +func (v *FileVisitor) Visit(fn VisitorFunc) error { + var f *os.File + if v.Path == constSTDINstr { + f = os.Stdin + } else { + var err error + f, err = os.Open(v.Path) + if err != nil { + return err + } + defer f.Close() + } + + // TODO: Consider adding a flag to force to UTF16, apparently some + // Windows tools don't write the BOM + utf16bom := unicode.BOMOverride(unicode.UTF8.NewDecoder()) + v.StreamVisitor.Reader = transform.NewReader(f, utf16bom) + + return v.StreamVisitor.Visit(fn) +} + +// StreamVisitor reads objects from an io.Reader and walks them. A stream visitor can only be +// visited once. +// TODO: depends on objects being in JSON format before being passed to decode - need to implement +// a stream decoder method on runtime.Codec to properly handle this. +type StreamVisitor struct { + io.Reader + *mapper + + Source string + Schema ContentValidator +} + +// NewStreamVisitor is a helper function that is useful when we want to change the fields of the struct but keep calls the same. +func NewStreamVisitor(r io.Reader, mapper *mapper, source string, schema ContentValidator) *StreamVisitor { + return &StreamVisitor{ + Reader: r, + mapper: mapper, + Source: source, + Schema: schema, + } +} + +// Visit implements Visitor over a stream. StreamVisitor is able to distinct multiple resources in one stream. +func (v *StreamVisitor) Visit(fn VisitorFunc) error { + d := yaml.NewYAMLOrJSONDecoder(v.Reader, 4096) + for { + ext := runtime.RawExtension{} + if err := d.Decode(&ext); err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("error parsing %s: %v", v.Source, err) + } + // TODO: This needs to be able to handle object in other encodings and schemas. + ext.Raw = bytes.TrimSpace(ext.Raw) + if len(ext.Raw) == 0 || bytes.Equal(ext.Raw, []byte("null")) { + continue + } + if err := ValidateSchema(ext.Raw, v.Schema); err != nil { + return fmt.Errorf("error validating %q: %v", v.Source, err) + } + info, err := v.infoForData(ext.Raw, v.Source) + if err != nil { + if fnErr := fn(info, err); fnErr != nil { + return fnErr + } + continue + } + if err := fn(info, nil); err != nil { + return err + } + } +} + +func UpdateObjectNamespace(info *Info, err error) error { + if err != nil { + return err + } + if info.Object != nil { + return metadataAccessor.SetNamespace(info.Object, info.Namespace) + } + return nil +} + +// FilterNamespace omits the namespace if the object is not namespace scoped +func FilterNamespace(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + info.Namespace = "" + UpdateObjectNamespace(info, nil) + } + return nil +} + +// SetNamespace ensures that every Info object visited will have a namespace +// set. If info.Object is set, it will be mutated as well. +func SetNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + } + return nil + } +} + +// RequireNamespace will either set a namespace if none is provided on the +// Info object, or if the namespace is set and does not match the provided +// value, returns an error. This is intended to guard against administrators +// accidentally operating on resources outside their namespace. +func RequireNamespace(namespace string) VisitorFunc { + return func(info *Info, err error) error { + if err != nil { + return err + } + if !info.Namespaced() { + return nil + } + if len(info.Namespace) == 0 { + info.Namespace = namespace + UpdateObjectNamespace(info, nil) + return nil + } + if info.Namespace != namespace { + return fmt.Errorf("the namespace from the provided object %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", info.Namespace, namespace, info.Namespace) + } + return nil + } +} + +// RetrieveLatest updates the Object on each Info by invoking a standard client +// Get. +func RetrieveLatest(info *Info, err error) error { + if err != nil { + return err + } + if meta.IsListType(info.Object) { + return fmt.Errorf("watch is only supported on individual resources and resource collections, but a list of resources is found") + } + if len(info.Name) == 0 { + return nil + } + if info.Namespaced() && len(info.Namespace) == 0 { + return fmt.Errorf("no namespace set on resource %s %q", info.Mapping.Resource, info.Name) + } + return info.Get() +} + +// RetrieveLazy updates the object if it has not been loaded yet. +func RetrieveLazy(info *Info, err error) error { + if err != nil { + return err + } + if info.Object == nil { + return info.Get() + } + return nil +} + +// CreateAndRefresh creates an object from input info and refreshes info with that object +func CreateAndRefresh(info *Info) error { + obj, err := NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object, nil) + if err != nil { + return err + } + info.Refresh(obj, true) + return nil +} + +type FilterFunc func(info *Info, err error) (bool, error) + +type FilteredVisitor struct { + visitor Visitor + filters []FilterFunc +} + +func NewFilteredVisitor(v Visitor, fn ...FilterFunc) Visitor { + if len(fn) == 0 { + return v + } + return FilteredVisitor{v, fn} +} + +func (v FilteredVisitor) Visit(fn VisitorFunc) error { + return v.visitor.Visit(func(info *Info, err error) error { + if err != nil { + return err + } + for _, filter := range v.filters { + ok, err := filter(info, nil) + if err != nil { + return err + } + if !ok { + return nil + } + } + return fn(info, nil) + }) +} + +func FilterByLabelSelector(s labels.Selector) FilterFunc { + return func(info *Info, err error) (bool, error) { + if err != nil { + return false, err + } + a, err := meta.Accessor(info.Object) + if err != nil { + return false, err + } + if !s.Matches(labels.Set(a.GetLabels())) { + return false, nil + } + return true, nil + } +} + +type InfoListVisitor []*Info + +func (infos InfoListVisitor) Visit(fn VisitorFunc) error { + var err error + for _, i := range infos { + err = fn(i, err) + } + return err +} diff --git a/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go new file mode 100644 index 0000000000..08954b2417 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericclioptions/template_flags.go @@ -0,0 +1,135 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericclioptions + +import ( + "fmt" + "io/ioutil" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions/printers" +) + +// templates are logically optional for specifying a format. +// this allows a user to specify a template format value +// as --output=go-template= +var templateFormats = map[string]bool{ + "template": true, + "go-template": true, + "go-template-file": true, + "templatefile": true, +} + +// GoTemplatePrintFlags provides default flags necessary for template printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type GoTemplatePrintFlags struct { + // indicates if it is OK to ignore missing keys for rendering + // an output template. + AllowMissingKeys *bool + TemplateArgument *string +} + +func (f *GoTemplatePrintFlags) AllowedFormats() []string { + formats := make([]string, 0, len(templateFormats)) + for format := range templateFormats { + formats = append(formats, format) + } + sort.Strings(formats) + return formats +} + +// ToPrinter receives an templateFormat and returns a printer capable of +// handling --template format printing. +// Returns false if the specified templateFormat does not match a template format. +func (f *GoTemplatePrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { + if (f.TemplateArgument == nil || len(*f.TemplateArgument) == 0) && len(templateFormat) == 0 { + return nil, NoCompatiblePrinterError{Options: f, OutputFormat: &templateFormat} + } + + templateValue := "" + + if f.TemplateArgument == nil || len(*f.TemplateArgument) == 0 { + for format := range templateFormats { + format = format + "=" + if strings.HasPrefix(templateFormat, format) { + templateValue = templateFormat[len(format):] + templateFormat = format[:len(format)-1] + break + } + } + } else { + templateValue = *f.TemplateArgument + } + + if _, supportedFormat := templateFormats[templateFormat]; !supportedFormat { + return nil, NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} + } + + if len(templateValue) == 0 { + return nil, fmt.Errorf("template format specified but no template given") + } + + if templateFormat == "templatefile" || templateFormat == "go-template-file" { + data, err := ioutil.ReadFile(templateValue) + if err != nil { + return nil, fmt.Errorf("error reading --template %s, %v\n", templateValue, err) + } + + templateValue = string(data) + } + + p, err := printers.NewGoTemplatePrinter([]byte(templateValue)) + if err != nil { + return nil, fmt.Errorf("error parsing template %s, %v\n", templateValue, err) + } + + allowMissingKeys := true + if f.AllowMissingKeys != nil { + allowMissingKeys = *f.AllowMissingKeys + } + + p.AllowMissingKeys(allowMissingKeys) + return p, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to template printing to it +func (f *GoTemplatePrintFlags) AddFlags(c *cobra.Command) { + if f.TemplateArgument != nil { + c.Flags().StringVar(f.TemplateArgument, "template", *f.TemplateArgument, "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") + c.MarkFlagFilename("template") + } + if f.AllowMissingKeys != nil { + c.Flags().BoolVar(f.AllowMissingKeys, "allow-missing-template-keys", *f.AllowMissingKeys, "If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.") + } +} + +// NewGoTemplatePrintFlags returns flags associated with +// --template printing, with default values set. +func NewGoTemplatePrintFlags() *GoTemplatePrintFlags { + allowMissingKeysPtr := true + templateValuePtr := "" + + return &GoTemplatePrintFlags{ + TemplateArgument: &templateValuePtr, + AllowMissingKeys: &allowMissingKeysPtr, + } +} diff --git a/vendor/k8s.io/client-go/.travis.yml b/vendor/k8s.io/client-go/.travis.yml index 6b2b4c3058..1aed8db1ff 100644 --- a/vendor/k8s.io/client-go/.travis.yml +++ b/vendor/k8s.io/client-go/.travis.yml @@ -3,8 +3,7 @@ language: go go_import_path: k8s.io/client-go go: - - 1.11.1 + - 1.11.2 script: - - if [ "$TRAVIS_BRANCH" != "master" ]; then godep restore; fi - - go build ./... +- go build ./... diff --git a/vendor/k8s.io/client-go/CHANGELOG.md b/vendor/k8s.io/client-go/CHANGELOG.md index 667bfacdfe..1c27fa08cf 100644 --- a/vendor/k8s.io/client-go/CHANGELOG.md +++ b/vendor/k8s.io/client-go/CHANGELOG.md @@ -5,6 +5,114 @@ https://github.com/kubernetes/test-infra/issues/5843. Changes in `k8s.io/api` and `k8s.io/apimachinery` are mentioned here because `k8s.io/client-go` depends on them. +# v10.0.0 + +**Breaking Changes:** + +* Action required: client-go will no longer have bootstrap +(`k8s.io/client-go/tools/bootstrap`) related code. Any reference to it will +break. Please redirect all references to `k8s.io/bootstrap` instead. +([#67356](https://github.com/kubernetes/kubernetes/pull/67356)) + +* The methods `NewSelfSignedCACert` and `NewSignedCert` now use `crypto.Signer` +interface instead of `rsa.PrivateKey` for certificate creation. This is done +to allow different kind of private keys (for example: ecdsa). +([#69329](https://github.com/kubernetes/kubernetes/pull/69329)) + +* `GetScale` and `UpdateScale` methods have been added for `apps/v1` clients +and with this, no-verb scale clients have been removed. +([#70437](https://github.com/kubernetes/kubernetes/pull/70437)) + +* `k8s.io/client-go/util/cert/triple` package has been removed. +([#70966](https://github.com/kubernetes/kubernetes/pull/70966)) + +**New Features:** + +* `unfinished_work_microseconds` is added to the workqueue metrics. +It can be used to detect stuck worker threads (kube-controller-manager runs many workqueues.). +([#70884](https://github.com/kubernetes/kubernetes/pull/70884)) + +* A method `GetPorts` is added to expose the ports that were forwarded. +This can be used to retrieve the locally-bound port in cases where the input was port 0. +([#67513](https://github.com/kubernetes/kubernetes/pull/67513)) + +* Dynamic listers and informers, that work with `runtime.Unstructured` objects, +are added. These are useful for writing generic, non-generated controllers. +([68748](https://github.com/kubernetes/kubernetes/pull/68748)) + +* The dynamic fake client now supports JSONPatch. +([#69330](https://github.com/kubernetes/kubernetes/pull/69330)) + +* The `GetBinding` method is added for pods in the fake client. +([#69412](https://github.com/kubernetes/kubernetes/pull/69412)) + +**Bug fixes and Improvements:** + +* The `apiVersion` and action name values for fake evictions are now set. +([#69035](https://github.com/kubernetes/kubernetes/pull/69035)) + +* PEM files containing both TLS certificate and key can now be parsed in +arbitrary order. Previously key was always required to be first. +([#69536](https://github.com/kubernetes/kubernetes/pull/69536)) + +* Go clients created from a kubeconfig that specifies a `TokenFile` now +periodically reload the token from the specified file. +([#70606](https://github.com/kubernetes/kubernetes/pull/70606)) + +* It is now ensured that oversized data frames are not written to +spdystreams in `remotecommand.NewSPDYExecutor`. +([#70999](https://github.com/kubernetes/kubernetes/pull/70999)) + +* A panic occuring on calling `scheme.Convert` is fixed by populating the fake +dynamic client scheme. ([#69125](https://github.com/kubernetes/kubernetes/pull/69125)) + +* Add step to correctly setup permissions for the in-cluster-client-configuration example. +([#69232](https://github.com/kubernetes/kubernetes/pull/69232)) + +* The function `Parallelize` is deprecated. Use `ParallelizeUntil` instead. +([#68403](https://github.com/kubernetes/kubernetes/pull/68403)) + +* [k8s.io/apimachinery] Restrict redirect following from the apiserver to +same-host redirects, and ignore redirects in some cases. +([#66516](https://github.com/kubernetes/kubernetes/pull/66516)) + +## API changes + +**New Features:** + +* GlusterFS PersistentVolumes sources can now reference endpoints in any +namespace using the `spec.glusterfs.endpointsNamespace` field. +Ensure all kubelets are upgraded to 1.13+ before using this capability. +([#60195](https://github.com/kubernetes/kubernetes/pull/60195)) + +* The [dynamic audit configuration](https://github.com/kubernetes/community/blob/master/keps/sig-auth/0014-dynamic-audit-configuration.md) +API is added. ([#67547](https://github.com/kubernetes/kubernetes/pull/67547)) + +* A new field `EnableServiceLinks` is added to the `PodSpec` to indicate whether +information about services should be injected into pod's environment variables. +([#68754](https://github.com/kubernetes/kubernetes/pull/68754)) + +* `CSIPersistentVolume` feature, i.e. `PersistentVolumes` with +`CSIPersistentVolumeSource`, is GA. `CSIPersistentVolume` feature gate is now +deprecated and will be removed according to deprecation policy. +([#69929](https://github.com/kubernetes/kubernetes/pull/69929)) + +* Raw block volume support is promoted to beta, and enabled by default. +This is accessible via the `volumeDevices` container field in pod specs, +and the `volumeMode` field in persistent volume and persistent volume claims definitions. +([#71167](https://github.com/kubernetes/kubernetes/pull/71167)) + +**Bug fixes and Improvements:** + +* The default value of extensions/v1beta1 Deployment's `RevisionHistoryLimit` +is set to `MaxInt32`. ([#66605](https://github.com/kubernetes/kubernetes/pull/66605)) + +* `procMount` field is no longer incorrectly marked as required in openapi schema. +([#69694](https://github.com/kubernetes/kubernetes/pull/69694)) + +* The caBundle and service fields in admission webhook API objects now correctly +indicate they are optional. ([#70138](https://github.com/kubernetes/kubernetes/pull/70138)) + # v9.0.0 **Breaking Changes:** diff --git a/vendor/k8s.io/client-go/README.md b/vendor/k8s.io/client-go/README.md index 00be37999d..0530d9c531 100644 --- a/vendor/k8s.io/client-go/README.md +++ b/vendor/k8s.io/client-go/README.md @@ -2,7 +2,7 @@ Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster. -We currently recommend using the v9.0.0 tag. See [INSTALL.md](/INSTALL.md) for +We currently recommend using the v10.0.0 tag. See [INSTALL.md](/INSTALL.md) for detailed installation instructions. `go get k8s.io/client-go/...` works, but will build `master`, which doesn't handle the dependencies well. @@ -91,16 +91,16 @@ We will backport bugfixes--but not new features--into older versions of #### Compatibility matrix -| | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | Kubernetes 1.11 | Kubernetes 1.12 | -|---------------------|----------------|----------------|----------------|----------------|-----------------|-----------------|-----------------| -| client-go 3.0 | ✓ | - | +- | +- | +- | +- | +- | -| client-go 4.0 | +- | ✓ | +- | +- | +- | +- | +- | -| client-go 5.0 | +- | +- | ✓ | +- | +- | +- | +- | -| client-go 6.0 | +- | +- | +- | ✓ | +- | +- | +- | -| client-go 7.0 | +- | +- | +- | +- | ✓ | +- | +- | -| client-go 8.0 | +- | +- | +- | +- | +- | ✓ | +- | -| client-go 9.0 | +- | +- | +- | +- | +- | +- | ✓ | -| client-go HEAD | +- | +- | +- | +- | +- | +- | +- | +| | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | Kubernetes 1.11 | Kubernetes 1.12 | Kubernetes 1.13 | +|---------------------|----------------|----------------|----------------|-----------------|-----------------|-----------------|-----------------| +| client-go 4.0 | ✓ | +- | +- | +- | +- | +- | +- | +| client-go 5.0 | +- | ✓ | +- | +- | +- | +- | +- | +| client-go 6.0 | +- | +- | ✓ | +- | +- | +- | +- | +| client-go 7.0 | +- | +- | +- | ✓ | +- | +- | +- | +| client-go 8.0 | +- | +- | +- | +- | ✓ | +- | +- | +| client-go 9.0 | +- | +- | +- | +- | +- | ✓ | +- | +| client-go 10.0 | +- | +- | +- | +- | +- | +- | ✓ | +| client-go HEAD | +- | +- | +- | +- | +- | +- | +- | Key: @@ -128,9 +128,10 @@ between client-go versions. | client-go 4.0 | Kubernetes main repo, 1.7 branch | = - | | client-go 5.0 | Kubernetes main repo, 1.8 branch | = - | | client-go 6.0 | Kubernetes main repo, 1.9 branch | = - | -| client-go 7.0 | Kubernetes main repo, 1.10 branch | ✓ | +| client-go 7.0 | Kubernetes main repo, 1.10 branch | = - | | client-go 8.0 | Kubernetes main repo, 1.11 branch | ✓ | | client-go 9.0 | Kubernetes main repo, 1.12 branch | ✓ | +| client-go 10.0 | Kubernetes main repo, 1.13 branch | ✓ | | client-go HEAD | Kubernetes main repo, master branch | ✓ | Key: diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 438eb3beda..072e7392b1 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -70,6 +70,11 @@ type Config struct { // TODO: demonstrate an OAuth2 compatible client. BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the configuration that RESTClient will use for impersonation. Impersonate ImpersonationConfig @@ -322,9 +327,8 @@ func InClusterConfig() (*Config, error) { return nil, ErrNotInCluster } - ts := NewCachedFileTokenSource(tokenFile) - - if _, err := ts.Token(); err != nil { + token, err := ioutil.ReadFile(tokenFile) + if err != nil { return nil, err } @@ -340,7 +344,8 @@ func InClusterConfig() (*Config, error) { // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), TLSClientConfig: tlsClientConfig, - WrapTransport: TokenSourceWrapTransport(ts), + BearerToken: string(token), + BearerTokenFile: tokenFile, }, nil } @@ -430,12 +435,13 @@ func AnonymousClientConfig(config *Config) *Config { // CopyConfig returns a copy of the given config func CopyConfig(config *Config) *Config { return &Config{ - Host: config.Host, - APIPath: config.APIPath, - ContentConfig: config.ContentConfig, - Username: config.Username, - Password: config.Password, - BearerToken: config.BearerToken, + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, Impersonate: ImpersonationConfig{ Groups: config.Impersonate.Groups, Extra: config.Impersonate.Extra, diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 0000000000..1620bbcf81 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResouces. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + apiResourceLists, _ := e.discoveryClient.ServerResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 0000000000..84491f4c5d --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,339 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + apiGroups, err := cl.ServerGroups() + if err != nil { + if apiGroups == nil || len(apiGroups.Groups) == 0 { + return nil, err + } + // TODO track the errors and update callers to handle partial errors. + } + var result []*APIGroupResources + for _, group := range apiGroups.Groups { + groupResources := &APIGroupResources{ + Group: group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, err := cl.ServerResourcesForGroupVersion(version.GroupVersion) + if err != nil { + // continue as best we can + // TODO track the errors and update callers to handle partial errors. + if resources == nil || len(resources.APIResources) == 0 { + continue + } + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, err +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.RESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 0000000000..6f3c9d9306 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,172 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.RESTMapper = &shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + apiResList, err := e.discoveryClient.ServerResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go index dea229c918..a7b8c1c6e4 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -229,11 +229,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI if len(configAuthInfo.Token) > 0 { mergedConfig.BearerToken = configAuthInfo.Token } else if len(configAuthInfo.TokenFile) > 0 { - ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile) - if _, err := ts.Token(); err != nil { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { return nil, err } - mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts) + mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile } if len(configAuthInfo.Impersonate) > 0 { mergedConfig.Impersonate = restclient.ImpersonationConfig{ diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 4081c23e7f..acb126d8b0 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -39,6 +39,11 @@ type Config struct { // Bearer token for authentication BearerToken string + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + // Impersonate is the config that this Config will impersonate using Impersonate ImpersonationConfig @@ -80,7 +85,7 @@ func (c *Config) HasBasicAuth() bool { // HasTokenAuth returns whether the configuration has token authentication or not. func (c *Config) HasTokenAuth() bool { - return len(c.BearerToken) != 0 + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 } // HasCertAuth returns whether the configuration has certificate authentication or not. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index da417cf96e..117a9c8c4d 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,6 +22,7 @@ import ( "strings" "time" + "golang.org/x/oauth2" "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" @@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip case config.HasBasicAuth() && config.HasTokenAuth(): return nil, fmt.Errorf("username/password or bearer token may be set, but not both") case config.HasTokenAuth(): - rt = NewBearerAuthRoundTripper(config.BearerToken, rt) + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } case config.HasBasicAuth(): rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) } @@ -265,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r type bearerAuthRoundTripper struct { bearer string + source oauth2.TokenSource rt http.RoundTripper } // NewBearerAuthRoundTripper adds the provided bearer token to a request // unless the authorization header has already been set. func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { - return &bearerAuthRoundTripper{bearer, rt} + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil } func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { @@ -280,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, } req = utilnet.CloneRequest(req) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer)) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return rt.rt.RoundTrip(req) } diff --git a/vendor/k8s.io/client-go/rest/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go similarity index 87% rename from vendor/k8s.io/client-go/rest/token_source.go rename to vendor/k8s.io/client-go/transport/token_source.go index c251b5eb0b..8595df2716 100644 --- a/vendor/k8s.io/client-go/rest/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package rest +package transport import ( "fmt" @@ -47,14 +47,14 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt func NewCachedFileTokenSource(path string) oauth2.TokenSource { return &cachingTokenSource{ now: time.Now, - leeway: 1 * time.Minute, + leeway: 10 * time.Second, base: &fileTokenSource{ path: path, - // This period was picked because it is half of the minimum validity - // duration for a token provisioned by they TokenRequest API. This is - // unsophisticated and should induce rotation at a frequency that should - // work with the token volume source. - period: 5 * time.Minute, + // This period was picked because it is half of the duration between when the kubelet + // refreshes a projected service account token and when the original token expires. + // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. + // This should induce re-reading at a frequency that works with the token volume source. + period: time.Minute, }, } } diff --git a/vendor/k8s.io/kubernetes/CHANGELOG-1.13.md b/vendor/k8s.io/kubernetes/CHANGELOG-1.13.md index 433fc1bcda..a22d8d73ee 100644 --- a/vendor/k8s.io/kubernetes/CHANGELOG-1.13.md +++ b/vendor/k8s.io/kubernetes/CHANGELOG-1.13.md @@ -1,53 +1,838 @@ -- [v1.13.0-rc.1](#v1130-rc1) - - [Downloads for v1.13.0-rc.1](#downloads-for-v1130-rc1) +- [v1.13.2](#v1132) + - [Downloads for v1.13.2](#downloads-for-v1132) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.13.0-beta.2](#changelog-since-v1130-beta2) + - [Changelog since v1.13.1](#changelog-since-v1131) - [Other notable changes](#other-notable-changes) -- [v1.13.0-beta.2](#v1130-beta2) - - [Downloads for v1.13.0-beta.2](#downloads-for-v1130-beta2) +- [v1.13.1](#v1131) + - [Downloads for v1.13.1](#downloads-for-v1131) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.13.0-beta.1](#changelog-since-v1130-beta1) + - [Changelog since v1.13.0](#changelog-since-v1130) - [Other notable changes](#other-notable-changes-1) -- [v1.13.0-beta.1](#v1130-beta1) - - [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1) +- [v1.13.0](#v1130) + - [Downloads for v1.13.0](#downloads-for-v1130) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) +- [Kubernetes 1.13 Release Notes](#kubernetes-113-release-notes) + - [Security Content](#security-content) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST do this before you upgrade)](#no-really-you-must-do-this-before-you-upgrade) + - [Known Issues](#known-issues) + - [Deprecations](#deprecations) + - [Major Themes](#major-themes) + - [SIG API Machinery](#sig-api-machinery) + - [SIG Auth](#sig-auth) + - [SIG AWS](#sig-aws) + - [SIG Azure](#sig-azure) + - [SIG Big Data](#sig-big-data) + - [SIG CLI](#sig-cli) + - [SIG Cloud Provider](#sig-cloud-provider) + - [SIG Cluster Lifecycle](#sig-cluster-lifecycle) + - [SIG IBM Cloud](#sig-ibm-cloud) + - [SIG Multicluster](#sig-multicluster) + - [SIG Network](#sig-network) + - [SIG Node](#sig-node) + - [SIG Openstack](#sig-openstack) + - [SIG Scalability](#sig-scalability) + - [SIG Scheduling](#sig-scheduling) + - [SIG Service Catalog](#sig-service-catalog) + - [SIG Storage](#sig-storage) + - [SIG UI](#sig-ui) + - [SIG VMWare](#sig-vmware) + - [SIG Windows](#sig-windows) + - [New Features](#new-features) + - [Release Notes From SIGs](#release-notes-from-sigs) + - [SIG API Machinery](#sig-api-machinery-1) + - [SIG Auth](#sig-auth-1) + - [SIG Autoscaling](#sig-autoscaling) + - [SIG AWS](#sig-aws-1) + - [SIG Azure](#sig-azure-1) + - [SIG CLI](#sig-cli-1) + - [SIG Cloud Provider](#sig-cloud-provider-1) + - [SIG Cluster Lifecycle](#sig-cluster-lifecycle-1) + - [SIG GCP](#sig-gcp) + - [SIG Network](#sig-network-1) + - [SIG Node](#sig-node-1) + - [SIG OpenStack](#sig-openstack-1) + - [SIG Release](#sig-release) + - [SIG Scheduling](#sig-scheduling-1) + - [SIG Storage](#sig-storage-1) + - [SIG Windows](#sig-windows-1) + - [External Dependencies](#external-dependencies) +- [v1.13.0-rc.2](#v1130-rc2) + - [Downloads for v1.13.0-rc.2](#downloads-for-v1130-rc2) + - [Client Binaries](#client-binaries-3) + - [Server Binaries](#server-binaries-3) + - [Node Binaries](#node-binaries-3) + - [Changelog since v1.13.0-rc.1](#changelog-since-v1130-rc1) + - [Other notable changes](#other-notable-changes-2) +- [v1.13.0-rc.1](#v1130-rc1) + - [Downloads for v1.13.0-rc.1](#downloads-for-v1130-rc1) + - [Client Binaries](#client-binaries-4) + - [Server Binaries](#server-binaries-4) + - [Node Binaries](#node-binaries-4) + - [Changelog since v1.13.0-beta.2](#changelog-since-v1130-beta2) + - [Other notable changes](#other-notable-changes-3) +- [v1.13.0-beta.2](#v1130-beta2) + - [Downloads for v1.13.0-beta.2](#downloads-for-v1130-beta2) + - [Client Binaries](#client-binaries-5) + - [Server Binaries](#server-binaries-5) + - [Node Binaries](#node-binaries-5) + - [Changelog since v1.13.0-beta.1](#changelog-since-v1130-beta1) + - [Other notable changes](#other-notable-changes-4) +- [v1.13.0-beta.1](#v1130-beta1) + - [Downloads for v1.13.0-beta.1](#downloads-for-v1130-beta1) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) - [Changelog since v1.13.0-alpha.3](#changelog-since-v1130-alpha3) - [Action Required](#action-required) - - [Other notable changes](#other-notable-changes-2) + - [Other notable changes](#other-notable-changes-5) - [v1.13.0-alpha.3](#v1130-alpha3) - [Downloads for v1.13.0-alpha.3](#downloads-for-v1130-alpha3) - - [Client Binaries](#client-binaries-3) - - [Server Binaries](#server-binaries-3) - - [Node Binaries](#node-binaries-3) + - [Client Binaries](#client-binaries-7) + - [Server Binaries](#server-binaries-7) + - [Node Binaries](#node-binaries-7) - [Changelog since v1.13.0-alpha.2](#changelog-since-v1130-alpha2) - - [Other notable changes](#other-notable-changes-3) + - [Other notable changes](#other-notable-changes-6) - [v1.13.0-alpha.2](#v1130-alpha2) - [Downloads for v1.13.0-alpha.2](#downloads-for-v1130-alpha2) - - [Client Binaries](#client-binaries-4) - - [Server Binaries](#server-binaries-4) - - [Node Binaries](#node-binaries-4) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Changelog since v1.13.0-alpha.1](#changelog-since-v1130-alpha1) - - [Other notable changes](#other-notable-changes-4) + - [Other notable changes](#other-notable-changes-7) - [v1.13.0-alpha.1](#v1130-alpha1) - [Downloads for v1.13.0-alpha.1](#downloads-for-v1130-alpha1) - - [Client Binaries](#client-binaries-5) - - [Server Binaries](#server-binaries-5) - - [Node Binaries](#node-binaries-5) + - [Client Binaries](#client-binaries-9) + - [Server Binaries](#server-binaries-9) + - [Node Binaries](#node-binaries-9) - [Changelog since v1.12.0](#changelog-since-v1120) - [Action Required](#action-required-1) - - [Other notable changes](#other-notable-changes-5) + - [Other notable changes](#other-notable-changes-8) +# v1.13.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes.tar.gz) | `fe1c30efaffb70b4102879580470031baf78f11c94fc37773bd69568a3aca9a93a0350d067faa2fa0f25f3e85005fe5944cecd7a33d48326f55c60d0c0408004` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-src.tar.gz) | `a6fb14fef46a566a68847cbb522ea091c545293f16af7ddf9ab26a801e548debcd4e4dc48aa6e38cc92bd65f69ca78e7db27d137e915efe687d3228962b94ecb` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-darwin-386.tar.gz) | `9998b7286281018f9bb1d0eeac9d59f287c2f4240f55ff362a9ce2d01565d60931793715904ef76cc82004556db4dedf18869e56022f054518b09b8b9fe6bfcf` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-darwin-amd64.tar.gz) | `4b1016ed9194d6c1e96a5aa896426be3288cf2f5c98cad9383b3760247c52bcdddbdbe4c697196e31cd0491469eb55ee6cd1ec388f82854b88eb68e4e1ece4e1` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-386.tar.gz) | `a48aba3f68a77a65d44121a0fc8ae6b508c71e483f19b05faea924f60cc18c7443c218b7c5170ba958dadb1411ce3aa9517782f3d0a68b5a704a33cb59d76212` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-amd64.tar.gz) | `1c389c36b531349e745bd036b6f33224a116fc4b6fbaff86d96a15dbae436730848263bb83f93fcf63a0e6ceaabd729817bdd1606fe180f57106da660c3c36f0` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-arm.tar.gz) | `87e958e7a9436d4db2264a21c96a113d2e88d485bbb5ea9e73fbeff39dbb3d0f3678cc5a491b72cde19a8564c17717f0bbcc31ead8197cfb0524e15e33820723` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-arm64.tar.gz) | `9ad4f639a95a2211594f125db38f67528ff3c38d6ae32b1cee3b7102bb475d9724ec232acf13f1ee5cbb9e22c58a8cfc09546df4bb2ab7fbde76195cd7f21cdc` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-ppc64le.tar.gz) | `25223a041dca0d13f8f26edae7d559c21c481c42b78a131614db71a8bee2fb85b81857eb34979a42d061e089e2673575d78ba3986b0491a1a844c9a7c74d145a` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-linux-s390x.tar.gz) | `62473b3798f01f0c1776bea6c2877d68e0d0f7221eceabb9108aa2f6060343bd46eba6b9564ae0d6a03fafaef3f32bc7d8c5da5ea970779fa0843395c55056cc` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-windows-386.tar.gz) | `6e43661690067229691df9f62788f23e1af40e23136e1e6c2c316c88a705b1554560c1489787fd34c594f81409a3814d3f7ff598c4248a7d1919db12f5d78fcf` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-client-windows-amd64.tar.gz) | `4bdf3074f3f50fd794d7ccf16c076d412686a1e3c435c9970d3e84da2b44c176c5f17f787e0e08bc30ac33a02ee1f3ca13b7fa4bbd2a07454d8a40c5d500b018` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-server-linux-amd64.tar.gz) | `e2d2f02f76578c5a1c04a9a417e9f1dc16abcc28daab688c50554a1bf64bb75dfbf5aae6d6f3e5c4463b214034e0220884727e314fa563f6e2199b9a3e4147a9` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-server-linux-arm.tar.gz) | `6f4a331fd78157866238dd9b8d986d33bfce6548ae503d0c9f4fb42854b0793df659298f95577103994047d01a6b613140b0eece3ae8c294bb0fe8ace84cdea1` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-server-linux-arm64.tar.gz) | `50fc95da2598775029b828f31978f6e2a98eceee890bd575122e7697dc7805f1fb9de060998999f3f9d8bc76f5d534b25f64f9b340c70a58503172ae2818d98a` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-server-linux-ppc64le.tar.gz) | `eea26a444b99d5fe984efa12672125f4482ab6f0e5cd52b5d110669bc0f1ff1239293cdca5af4ac862507e1c7a2ffdb890a0ff6dc13a6c971a40809370986d4e` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-server-linux-s390x.tar.gz) | `d2277b94c0f487eb8636355e8d0880af9f7a7e4fd5e6688278814582a0d872aa4884b2411d8a2ff5ea1b917ec178b973edb8af4fbb55c79a0f5c902dcfd43fbf` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-linux-amd64.tar.gz) | `f908a95f264792bdc0d8c79c818d044b81bf5cf7179b239a3c3835a58d1e61e5b31df913bb6cfeb662864102b9f51951565e54f419fa2d56859041b7c68de82a` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-linux-arm.tar.gz) | `24d5bedf4da1aa7d445b8ff2e62f43210f6bd5cc4a3a53b157e5edfe1e00c71b25d84767c8738c9c4f6fdfc86e56de4c6225eabaea4ca8385da0856daef6d66d` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-linux-arm64.tar.gz) | `52936dfe3c41207495f5831c9ec93be2e188ee97d696de3a0317ed8caefab2b002c3b1b72c375bd11368a6a8ac7112b0451190d54e1b234d2b5e45a30ea8eddc` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-linux-ppc64le.tar.gz) | `c07bee8d4511eadd0fb45107b672898c53fc817f5de034c57f4ef0fbeef0a8964f188604b3fc0ce042e511444e3a2117574136a4a9b4e013ac316e6d72d8fcd5` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-linux-s390x.tar.gz) | `12b588e226bb7b7dc78378962d46553e540539c666c364bdfc0f1228a87bca90d54ba42dfd7558369f4c986c6460fc5bc98244b2274cb8801997c9f1f4ce95f0` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.2/kubernetes-node-windows-amd64.tar.gz) | `219fe75c216a0d42b944699e8da9f5b4eeff3cc43415d139954c577b75a27b108dc787e7b459cdc0251095367edefab2afc8d5297f33fc44cd222840968b3a08` + +## Changelog since v1.13.1 + +### Other notable changes + +* client-go: shortens refresh period for token files to 1 minute to ensure auto-rotated projected service account tokens are read frequently enough. ([#72437](https://github.com/kubernetes/kubernetes/pull/72437), [@liggitt](https://github.com/liggitt)) +* Updates the kubernetes dashboard add-on to v1.10.1. Skipping dashboard login is no longer enabled by default. ([#72495](https://github.com/kubernetes/kubernetes/pull/72495), [@liggitt](https://github.com/liggitt)) +* Fixes a bug in HPA controller so HPAs are always updated every resyncPeriod (15 seconds). ([#72373](https://github.com/kubernetes/kubernetes/pull/72373), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +* Fix device mountable volume names in DSW to prevent races in device mountable plugin, e.g. local. ([#71509](https://github.com/kubernetes/kubernetes/pull/71509), [@cofyc](https://github.com/cofyc)) +* change azure disk host cache to ReadOnly by default ([#72229](https://github.com/kubernetes/kubernetes/pull/72229), [@andyzhangx](https://github.com/andyzhangx)) +* Fixes issue with cleaning up stale NFS subpath mounts ([#71804](https://github.com/kubernetes/kubernetes/pull/71804), [@msau42](https://github.com/msau42)) +* Fix a race condition in the scheduler preemption logic that could cause nominatedNodeName of a pod not to be considered in one or more scheduling cycles. ([#72259](https://github.com/kubernetes/kubernetes/pull/72259), [@bsalamat](https://github.com/bsalamat)) +* Fixes `kubectl create secret docker-registry` compatibility ([#72344](https://github.com/kubernetes/kubernetes/pull/72344), [@liggitt](https://github.com/liggitt)) +* Fix race condition introduced by graceful termination which can lead to a deadlock in kube-proxy ([#72361](https://github.com/kubernetes/kubernetes/pull/72361), [@lbernail](https://github.com/lbernail)) +* Support graceful termination with IPVS when deleting a service ([#71895](https://github.com/kubernetes/kubernetes/pull/71895), [@lbernail](https://github.com/lbernail)) +* Fixes issue where subpath volume content was deleted during orphaned pod cleanup for Local volumes that are directories (and not mount points) on the root filesystem. ([#72291](https://github.com/kubernetes/kubernetes/pull/72291), [@msau42](https://github.com/msau42)) +* kube-proxy in IPVS mode will stop initiating connections to terminating pods for services with sessionAffinity set. ([#71834](https://github.com/kubernetes/kubernetes/pull/71834), [@lbernail](https://github.com/lbernail)) +* fix race condition when attach azure disk in vmss ([#71992](https://github.com/kubernetes/kubernetes/pull/71992), [@andyzhangx](https://github.com/andyzhangx)) +* Reduce CSI log and event spam. ([#71581](https://github.com/kubernetes/kubernetes/pull/71581), [@saad-ali](https://github.com/saad-ali)) +* fix kubelet log flushing issue in azure disk ([#71990](https://github.com/kubernetes/kubernetes/pull/71990), [@andyzhangx](https://github.com/andyzhangx)) +* Update to use go1.11.3 with fix for CVE-2018-16875 ([#72035](https://github.com/kubernetes/kubernetes/pull/72035), [@seemethere](https://github.com/seemethere)) +* Fix a race condition in which kubeadm only waits for the kubelets kubeconfig file when it has performed the TLS bootstrap, but wasn't waiting for certificates to be present in the filesystem ([#72030](https://github.com/kubernetes/kubernetes/pull/72030), [@ereslibre](https://github.com/ereslibre)) +* kubeadm: fix a possible panic when joining a new control plane node in HA scenarios ([#72123](https://github.com/kubernetes/kubernetes/pull/72123), [@anitgandhi](https://github.com/anitgandhi)) +* kubeadm: fix a bug when syncing etcd endpoints ([#71945](https://github.com/kubernetes/kubernetes/pull/71945), [@pytimer](https://github.com/pytimer)) + + + +# v1.13.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.1 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes.tar.gz) | `de3858357b2b4444bccc0599c7d0edd3e6ec1a80267ef96883ebcfb06c518ce467dd8720b48084644677a42b8e3ffad9a7d4745b40170ce9dfe5b43310979be1` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-src.tar.gz) | `7f0a8dbd3c7397cc5a5bc0297eb24b8e734c3c7b78e48fc794c525377c3895f4fd84fd0a2fa70c5513cc47ee5a174c22bab54796abc5a8f2b30687642c819a68` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-386.tar.gz) | `371028dba7a28ec3c8f10b861448cb1574dce25d32d847af254b76b7f158aa4fcda695972e2a08440faa4e16077f8021b07115d0da897bef79c33e702f3be95e` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-darwin-amd64.tar.gz) | `6aa7025308e9fb1eb4415e504e8aa9c7a0a20b09c500cb48df82bbd04443101664b2614fb284875b9670d4bb11e8f1a10190eaf1d54f81f3a9526053958b0802` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-386.tar.gz) | `6453670bb61b4f5f7fe8ae78804864ecd52682b32592f6956faf3d2220884a64fb22ae2e668b63f28ea8fd354c50aa90ce61c60be327fb0b5fcfe2c7835ef559` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-amd64.tar.gz) | `ca00442f50b5d5627357dce97c90c17cb0126d746b887afdab2d4db9e0826532469fd1ee62f40eb6923761618f46752d10993578ca19c8b92c3a2aeb5102a318` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm.tar.gz) | `5fa170cbe56b8f5d103f520e2493f911c5eb59b51a6afdbaa9c08196943f1235e533f0384ce7c01c73a020c6889cf8f03cc3642912d0953c74d1098e4b21f3a0` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-arm64.tar.gz) | `710343ad067f0d642c43cd26871828275645b08b4f4c86bd555865318d8fe08b7f0a720174c04d58acffcb26faf563636dc13eef66a2813eac68bb8b994908f4` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-ppc64le.tar.gz) | `0fa7ab255f0cba3adc754337c6184e6ec464aa5a4d6dd4d38aad8a0e2430a0044f4ed1ffcd7cc7c863190d3cda6b84abd12ca7536139d665ad61fe7704e63d30` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-linux-s390x.tar.gz) | `749a8dce5b81e2edbd315841acac64a0e5d17bb1ead8173560b6a4ccc28604bc8254051297ab51cb5df845495bd75a45137827b3386e3962295fec8601563eaa` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-386.tar.gz) | `cd4732fbe569009c426f963318d05ddcc7c63dc27ec9d2bf9c60d716195e3676aa5b0e6ccbde6298f621450d365d41a910ce3ced89bf2ae6d3e81ee2fed0bb16` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-client-windows-amd64.tar.gz) | `40f5b5d221b3a611511690d316539dc8fb3f4513e4f9eb141bffa17c9ddeee875a462f5bd45e62ce7c7535310fc3e48e3441614700ee9877584c5948ddbef19f` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-amd64.tar.gz) | `e0e48825c5fe33a3f82b1b74847d9bfb8c5716c4313c5e4e6f46be0580e20a1e396a669b8ca446cfa581e3eb75698813249bbfcfc79c8a90793880eb5c177921` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm.tar.gz) | `7ff4856e7959cf14eba0e1ab274c0bf0d3193391e7034a936697f0c4813e81d8dda4a019d3185677bee9d1345a6433db3fd6e55f644a0f73d076e0b2014ed172` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-arm64.tar.gz) | `b8c2356002e675bd3de5ee9c2337a12e2a1bbfa2478f8e3b91065a578dfa8d50f596fd606d9f0232b06b8263867a7ca5cc7c04150718b8e40b49ae7d46001c30` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-ppc64le.tar.gz) | `5d3a15b1241d849d8954894aa7f3fb12606f9966f73fc36aa15152038fc385153b0f0e967cc0bf410a5d5894d0269e54eac581d8e79003904d7bc29b33e98684` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-server-linux-s390x.tar.gz) | `78a9cccaf9d737b519db0866c2e80c472c7136bc723910d08649ece1c420ae7f6e56e610d65c436c56ccef8360c4da0f70e75d0cf47c0c8e739f5138cdc7b0d2` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-amd64.tar.gz) | `3a7881a52885bebe5958f02dc54194cc8c330576b7cf5935189df4f0b754b958917b104e1d3358c0bc9277f13a8eef2176284548d664f27a36baa389fbcc7bea` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm.tar.gz) | `d0bfcff3ef7c0aa36005e7b111685438ebd0ea61d48dc68a7bd06eea3782b6eb224f9b651d80c955afa162f766c8b682976db43238562c293d6552cdadf9e934` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-arm64.tar.gz) | `2e23bd00661aceb30fa37e24ab71315755bd93dfcc5ff361d78445a8e9ff99e7b3a56641112af3184e8b107545fba6573a6368a82bd0ce475c81cb53fd44da3b` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-ppc64le.tar.gz) | `8d0fdb743c700d662886636fe67b52202cf9e6e57c2d7de5961b8189d8c03c91fda1d68c47033286efcc582e78be40846e2b1f5c589a0b94794fa2ce3c1ebfee` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-linux-s390x.tar.gz) | `70445038b4db62c3fc99540f5ddbb881387018244242f182332b8eaa7159ce1aa8929145010ab2befd4e101d39c24c61e430928235434c7d7eb54f113860a83a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.1/kubernetes-node-windows-amd64.tar.gz) | `a87ad43f5a6b8f66d1bbd64f9c91e8bcbdf4adc8de0ec3cd559adaa8c14a6fe078ffdf090e52627c0522b79209fcc37bf822b323895dd47b18c20026cb25e9f5` + +## Changelog since v1.13.0 + +### Other notable changes + +* Fix overlapping filenames in diff if multiple resources have the same name. ([#71923](https://github.com/kubernetes/kubernetes/pull/71923), [@apelisse](https://github.com/apelisse)) +* Disable proxy to loopback and linklocal ([#71980](https://github.com/kubernetes/kubernetes/pull/71980), [@micahhausler](https://github.com/micahhausler)) +* kube-scheduler: restores ability to run without authentication configuration lookup permissions ([#71755](https://github.com/kubernetes/kubernetes/pull/71755), [@liggitt](https://github.com/liggitt)) +* client-go: restores behavior of populating the BearerToken field in rest.Config objects constructed from kubeconfig files containing tokenFile config, or from in-cluster configuration. An additional BearerTokenFile field is now populated to enable constructed clients to periodically refresh tokens. ([#71713](https://github.com/kubernetes/kubernetes/pull/71713), [@liggitt](https://github.com/liggitt)) +* apply: fix detection of non-dry-run enabled servers ([#71854](https://github.com/kubernetes/kubernetes/pull/71854), [@apelisse](https://github.com/apelisse)) +* Scheduler only activates unschedulable pods if node's scheduling related properties change. ([#71551](https://github.com/kubernetes/kubernetes/pull/71551), [@mlmhl](https://github.com/mlmhl)) +* Fixes pod deletion when cleaning old cronjobs ([#71802](https://github.com/kubernetes/kubernetes/pull/71802), [@soltysh](https://github.com/soltysh)) +* fix issue: vm sku restriction policy does not work in azure disk attach/detach ([#71941](https://github.com/kubernetes/kubernetes/pull/71941), [@andyzhangx](https://github.com/andyzhangx)) +* Include CRD for BGPConfigurations, needed for calico 2.x to 3.x upgrade. ([#71868](https://github.com/kubernetes/kubernetes/pull/71868), [@satyasm](https://github.com/satyasm)) +* UDP connections now support graceful termination in IPVS mode ([#71515](https://github.com/kubernetes/kubernetes/pull/71515), [@lbernail](https://github.com/lbernail)) +* kubeadm: use kubeconfig flag instead of kubeconfig-dir on init phase bootstrap-token ([#71803](https://github.com/kubernetes/kubernetes/pull/71803), [@yagonobre](https://github.com/yagonobre)) +* On GCI, NPD starts to monitor kubelet, docker, containerd crashlooping, read-only filesystem and corrupt docker overlay2 issues. ([#71522](https://github.com/kubernetes/kubernetes/pull/71522), [@wangzhen127](https://github.com/wangzhen127)) +* Fixes an issue where Portworx volumes cannot be mounted if 9001 port is already in use on the host and users remap 9001 to another port. ([#70392](https://github.com/kubernetes/kubernetes/pull/70392), [@harsh-px](https://github.com/harsh-px)) +* Only use the first IP address got from instance metadata. This is because Azure CNI would set up a list of IP addresses in instance metadata, while only the first one is the Node's IP. ([#71736](https://github.com/kubernetes/kubernetes/pull/71736), [@feiskyer](https://github.com/feiskyer)) +* kube-controller-manager: fixed issue display help for the deprecated insecure --port flag ([#71601](https://github.com/kubernetes/kubernetes/pull/71601), [@liggitt](https://github.com/liggitt)) +* Update Cluster Autoscaler version in gce manifests to 1.13.1 (https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.1) ([#71842](https://github.com/kubernetes/kubernetes/pull/71842), [@losipiuk](https://github.com/losipiuk)) +* kubectl: fixes regression in --sort-by behavior ([#71805](https://github.com/kubernetes/kubernetes/pull/71805), [@liggitt](https://github.com/liggitt)) +* Fixes apiserver nil pointer panics when requesting v2beta1 autoscaling object metrics ([#71744](https://github.com/kubernetes/kubernetes/pull/71744), [@yue9944882](https://github.com/yue9944882)) +* Fix scheduling starvation of pods in cluster with large number of unschedulable pods. ([#71488](https://github.com/kubernetes/kubernetes/pull/71488), [@bsalamat](https://github.com/bsalamat)) + + + +# v1.13.0 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes.tar.gz) | `7b6a81c9f1b852b1e889c1b62281569a4b8853c79e5675b0910d941dfa7863c97f244f6d607aae3faf60bccd596dedb9d136b7fffeae199876e780904fd9f31e` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-src.tar.gz) | `844b9fbba21374dd190c8f12dd0e5b3303dd2cd7ad25f241d6f7e46f74adf6987afad021553521d4f479c19d87aa8d4d5be77ac7a6715d31a9187a5bab3b397b` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-386.tar.gz) | `0c010351acb660a75122feb876c9887d46ec2cb466872dd073b7f5b26fdadd96888a350e01606f2ae43606a5a4ab2d9309441f4357cee924b19688f9b02c55dc` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-darwin-amd64.tar.gz) | `c2c40bd202900124f4e9458b067a1e1fc040030dc84ce9bcc6a5beb263de05892c16f3bdafb8d854e343e71f086207f390fd0b60f6e32e770c73294b053da6e4` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-386.tar.gz) | `5f5449be103b103d72a4e2b1028ab014cf7f74781166327f2ae284e4f5ecb539f6b60f36b8f7c7be0ae43dfb30661b2672dd93a1fa7e26d6c67498672674bf12` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-amd64.tar.gz) | `61a6cd3b1fb34507e0b762a45da09d88e34921985970a2ba594e0e5af737d94c966434b4e9f8e84fb73a0aeb5fa3e557344cd2eb902bf73c67d4b4bff33c6831` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm.tar.gz) | `dd5591e2b88c347759a138c4d2436a0f5252341d0e8c9fbab16b8f151e2744cbdd0c8583555a451425bc471f11b688ce568d9245caf8a278cbac2b343fdead89` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-arm64.tar.gz) | `894ed30261598ebf3485f3575e95f85e3c353f4d834bf9a6ea53b265427704b43fba5403fbc4d522b3f02afb08e6afaae200af1fe57996291a7c74398ec2fe17` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-ppc64le.tar.gz) | `6c26c807fc730ea736fda75dc57ac73395ba78bb828fffeee18b385be550d8f3ba2bbc27a52a8f15bcbbe68218c7945d9fb725e6759c117422bc0a632c110670` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-linux-s390x.tar.gz) | `41e6e972de77c0bde22fdd779ea64e731b60f32e97e78a024f33fc3e33a3b364b7f77ece7d3c64ad85b7f8fe7c8fc6d6892098a3362d1fe01ebf3d551fe2bf37` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-386.tar.gz) | `442229e5030452901b924a94e7a879d4085597a4f201a5b3fc5ac9806cab5830c836cfa7a33e8f1693fe2e8badc4047bf227d7fb00c537fb1fb4cb7639de5455` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-client-windows-amd64.tar.gz) | `a11a8e8e732e7292781b9cb1de6e3e41683f95fb3fefc2b1a7b5fb1f064a0d80c0833876d931675135778457d81de9ed2e81caee4b3eb27d9f23c7b722b17442` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-amd64.tar.gz) | `a8e3d457e5bcc1c09eeb66111e8dd049d6ba048c3c0fa90a61814291afdcde93f1c6dbb07beef090d1d8a9958402ff843e9af23ae9f069c17c0a7c6ce4034686` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm.tar.gz) | `4e17494767000256775e4dd33c0a9b2d152bd4b5fba9f343b6dfeb5746ff34e400a8e0aaf2153476453225ef57e4bb1ae3635416ab18f9e4dabf4e5cc82f8aaa` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-arm64.tar.gz) | `0ddd0cf0ff56cebfa89efb1972cc2bc6916e824c2af56cfd330ac5638c8918eaf3c60d05714b220dbf4f896160eded123beeba42f5be55fe434a43d04508d86a` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-ppc64le.tar.gz) | `b93828560224e812ed21b57fea5458fa8560745cfec96fc1677b258393c00e208ad9b99467b575e74e01699ffd75f03f5793675032e7306cba7208c1afb53c8d` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-server-linux-s390x.tar.gz) | `154d565329d5ba52cdb7c3d43d8854b7a9b8e34803c4df6b3e6ae74c1a6e255c78e6559b7546b9158df0e3f7931bbdaf43407d95cd875c79f5cce960bb9882dd` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-amd64.tar.gz) | `9d18ba5f0c3b09edcf29397a496a1e908f4906087be3792989285630d7bcbaf6cd3bdd7b07dace439823885acc808637190f5eaa240b7b4580acf277b67bb553` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm.tar.gz) | `959b04ff7b8690413e01bffeabaab2119794dedf06b7aae1743e49988f797cb7e6ff12e1a91af2d4c5f664414f3aa4bd9020521c6a21c1196c194d12a6f7fe08` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-arm64.tar.gz) | `b5c18e8c9e28cf276067c871446720d86b6f162e22c3a5e9343cdbc6857baa6961d09a6908b6acd1bbd132c2e2e526377676babf77b8d3bfb36f8711827c105a` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-ppc64le.tar.gz) | `63e3504d3b115fdf3396968afafd1107b98e5a1a15b7c042a87f5a9cffbdc274f7b06b07ce90eb51876cfffd57cf7f20180bad7e9f9762af577e51f4f13d2f7a` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-linux-s390x.tar.gz) | `21c5c2721febf7fddeada9569f3ecbd059267e5d2cc325d98fb74faf1ae9e9e15899750225a1fc7c25feef96e7705b1456cb489f4882b9eb10e78bd0f590d019` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0/kubernetes-node-windows-amd64.tar.gz) | `3e73d3ecff14b4c85a71bb6cf91b1ab7d9c3075c64bd5ce6863562ab17bf808b0cbc33ddd25346d25040649c1ad89745796afd218190886b54f1d8acc17896e4` + +# Kubernetes 1.13 Release Notes + +## Security Content + +- CVE-2018-1002105, a critical security issue in the Kubernetes API Server, is resolved in v1.13.0 (and in [v1.10.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.10.md/#v11011), [v1.11.5](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md/#v1115), and [v1.12.3](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md/#v1123)). We recommend all clusters running previous versions update to one of these releases immediately. See issue [#71411](https://github.com/kubernetes/kubernetes/issues/71411) for details. + +## Urgent Upgrade Notes + +### (No, really, you MUST do this before you upgrade) + +Before upgrading to Kubernetes 1.13, you must keep the following in mind: + +- kube-apiserver + - The deprecated `etcd2` storage backend has been removed. Before upgrading a kube-apiserver using `--storage-backend=etcd2`, etcd v2 data must be migrated to the v3 storage backend, and kube-apiserver invocations changed to use `--storage-backend=etcd3`. Please consult the installation procedure used to set up etcd for specific migration instructions. Backups prior to upgrade are always a good practice, but since the etcd2 to etcd3 migration is not reversible, an etcd backup prior to migration is essential. + - The deprecated `--etcd-quorum-read` flag has been removed. Quorum reads are now always enabled when fetching data from etcd. Remove the `--etcd-quorum-read` flag from kube-apiserver invocations before upgrading. +- kube-controller-manager + - The deprecated `--insecure-experimental-approve-all-kubelet-csrs-for-group` flag has been removed. +- kubelet + - The deprecated `--google-json-key` flag has been removed. Remove the `--google-json-key` flag from kubelet invocations before upgrading. ([#69354](https://github.com/kubernetes/kubernetes/pull/69354), [@yujuhong](https://github.com/yujuhong)) + - DaemonSet pods now make use of scheduling features that require kubelets to be at 1.11 or above. Ensure all kubelets in the cluster are at 1.11 or above before upgrading kube-controller-manager to 1.13. + - The schema for the alpha `CSINodeInfo` CRD has been split into `spec` and `status` fields, and new fields `status.available` and `status.volumePluginMechanism` added. Clusters using the previous alpha schema must delete and recreate the CRD using the new schema. ([#70515](https://github.com/kubernetes/kubernetes/pull/70515), [@davidz627](https://github.com/davidz627)) +- kube-scheduler dropped support for configuration files with apiVersion `componentconfig/v1alpha1`. Ensure kube-scheduler is configured using command-line flags or a configuration file with apiVersion `kubescheduler.config.k8s.io/v1alpha1` before upgrading to 1.13. +- kubectl + - The deprecated command `run-container` has been removed. Invocations should use `kubectl run` instead ([#70728](https://github.com/kubernetes/kubernetes/pull/70728), [@Pingan2017](https://github.com/Pingan2017)) +- client-go releases will no longer have bootstrap (k8s.io/client-go/tools/bootstrap) related code. Any reference to it will break. Please redirect all references to k8s.io/bootstrap instead. ([#67356](https://github.com/kubernetes/kubernetes/pull/67356), [@yliaog](https://github.com/yliaog)) +- Kubernetes cannot distinguish between GCE Zonal PDs and Regional PDs with the same name. To workaround this issue, precreate PDs with unique names. PDs that are dynamically provisioned do not encounter this issue. ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@msau42](https://github.com/msau42)) + +## Known Issues + +- If kubelet plugin registration for a driver fails, kubelet will not retry. The driver must delete and recreate the driver registration socket in order to force kubelet to attempt registration again. Restarting only the driver container may not be sufficient to trigger recreation of the socket, instead a pod restart may be required. ([#71487](https://github.com/kubernetes/kubernetes/issues/71487)) +- In some cases, a Flex volume resize may leave a PVC with erroneous Resizing condition even after volume has been successfully expanded. Users may choose to delete the condition, but it is not required. ([#71470](https://github.com/kubernetes/kubernetes/issues/71470)) +- The CSI driver-registrar external sidecar container v1.0.0-rc2 is known to take up to 1 minute to start in some cases. We expect this issue to be resolved in a future release of the sidecar container. For verification, please see the release notes of future releases of the external sidecar container. ([#76](https://github.com/kubernetes-csi/driver-registrar/issues/76)) +- When using IPV6-only, be sure to use `proxy-mode=iptables` as `proxy-mode=ipvs` is known to not work. ([#68437](https://github.com/kubernetes/kubernetes/issues/68437)) + +## Deprecations + +- kube-apiserver + - The `--service-account-api-audiences` flag is deprecated in favor of `--api-audiences`. The old flag is accepted with a warning but will be removed in a future release. ([#70105](https://github.com/kubernetes/kubernetes/pull/70105), [@mikedanese](https://github.com/mikedanese)) + - The `--experimental-encryption-provider-config` flag is deprecated in favor of `--encryption-provider-config`. The old flag is accepted with a warning but will be removed in 1.14. ([#71206](https://github.com/kubernetes/kubernetes/pull/71206), [@stlaz](https://github.com/stlaz)) + - As part of graduating the etcd encryption feature to beta, the configuration file referenced by `--encryption-provider-config` now uses `kind: EncryptionConfiguration` and `apiVersion: apiserver.config.k8s.io/v1`. Support for `kind: EncryptionConfig` and `apiVersion: v1` is deprecated and will be removed in a future release. ([#67383](https://github.com/kubernetes/kubernetes/pull/67383), [@stlaz](https://github.com/stlaz)) + - The `--deserialization-cache-size` flag is deprecated, and will be removed in a future release. The flag is inactive since the etcd2 storage backend was removed. ([#69842](https://github.com/kubernetes/kubernetes/pull/69842), [@liggitt](https://github.com/liggitt)) + - The `Node` authorization mode no longer allows kubelets to delete their Node API objects (prior to 1.11, in rare circumstances related to cloudprovider node ID changes, kubelets would attempt to delete/recreate their Node object at startup) ([#71021](https://github.com/kubernetes/kubernetes/pull/71021), [@liggitt](https://github.com/liggitt)) + - The built-in `system:csi-external-provisioner` and `system:csi-external-attacher` cluster roles are deprecated and will not be auto-created in a future release. CSI deployments should provide their own RBAC role definitions with required permissions. ([#69868](https://github.com/kubernetes/kubernetes/pull/69868), [@pohly]( https://github.com/pohly)) + - The built-in `system:aws-cloud-provider` cluster role is deprecated and will not be auto-created in a future release. Deployments using the AWS cloud provider should grant required permissions to the `aws-cloud-provider` service account in the `kube-system` namespace as part of deployment. ([#66635](https://github.com/kubernetes/kubernetes/pull/66635), [@wgliang](https://github.com/wgliang)) +- kubelet + - Use of the beta plugin registration directory `{kubelet_root_dir}/plugins/` for registration of external drivers via the kubelet plugin registration protocol is deprecated in favor of `{kubelet_root_dir}/plugins_registry/`. Support for the old directory is planned to be removed in v1.15. Device plugin and CSI storage drivers should switch to the new directory prior to v1.15. Only CSI storage drivers that support 0.x versions of the CSI API are allowed in the old directory. ([#70494](https://github.com/kubernetes/kubernetes/pull/70494) by [@RenaudWasTaken](https://github.com/RenaudWasTaken) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314) by [@saad-ali](https://github.com/saad-ali)) + - With the release of the CSI 1.0 API, support for CSI drivers using 0.3 and older releases of the CSI API is deprecated, and is planned to be removed in Kubernetes v1.15. CSI drivers should be updated to support the CSI 1.0 API, and deployed in the new kubelet plugin registration directory (`{kubelet_root_dir}/plugins_registry/`) once all nodes in the cluster are at 1.13 or higher ([#71020](https://github.com/kubernetes/kubernetes/pull/71020) and [#71314](https://github.com/kubernetes/kubernetes/pull/71314), both by [@saad-ali](https://github.com/saad-ali)) + - Use of the `--node-labels` flag to set labels under the `kubernetes.io/` and `k8s.io/` prefix will be subject to restriction by the `NodeRestriction` admission plugin in future releases. [See admission plugin documentation](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction) for allowed labels. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt)) +- kube-scheduler + - The alpha critical pod annotation (`scheduler.alpha.kubernetes.io/critical-pod`) is deprecated. Pod priority should be used instead to mark pods as critical. ([#70298](https://github.com/kubernetes/kubernetes/pull/70298), [@bsalamat](https://github.com/bsalamat)) +- The following features are now GA, and the associated feature gates are deprecated and will be removed in a future release: + - CSIPersistentVolume + - GCERegionalPersistentDisk + - KubeletPluginsWatcher + - VolumeScheduling +- kubeadm + - The DynamicKubeletConfig feature gate is deprecated. The functionality is still accessible by using the kubeadm alpha kubelet enable-dynamic command. + - The command `kubeadm config print-defaults` is deprecated in favor of `kubeadm config print init-defaults` and `kubeadm config print join-defaults` ([#69617](https://github.com/kubernetes/kubernetes/pull/69617), [@rosti](https://github.com/rosti)) + - support for the `v1alpha3` configuration file format is deprecated and will be removed in 1.14. Use `kubeadm config migrate` to migrate `v1alpha3` configuration files to `v1beta1`, which provides improvements in image repository management, addons configuration, and other areas. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 +- The `node.status.volumes.attached.devicePath` field is deprecated for CSI volumes and will not be set in future releases ([#71095](https://github.com/kubernetes/kubernetes/pull/71095), [@msau42](https://github.com/msau42)) +- kubectl + - The `kubectl convert` command is deprecated and will be removed in a future release ([#70820](https://github.com/kubernetes/kubernetes/pull/70820), [@seans3](https://github.com/seans3)) +- Support for passing unknown provider names to the E2E test binaries is deprecated and will be removed in a future release. Use `--provider=skeleton` (no ssh access) or `--provider=local` (local cluster with ssh) instead. ([#70141](https://github.com/kubernetes/kubernetes/pull/70141), [@pohly](https://github.com/pohly)) + +## Major Themes + +### SIG API Machinery + +For the 1.13 release, SIG API Machinery is happy to announce that the [dry-run functionality](https://kubernetes.io/docs/reference/using-api/api-concepts/#dry-run) is now beta. + +### SIG Auth + +With this release we've made several important enhancements to core SIG Auth areas. In the authorization category, we've further reduced Kubelet privileges by [restricting node self-updates of labels to a whitelisted selection and by disallowing kubelets from deleting their Node API object](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction). In authentication, we added alpha-level support for automounting improved service account tokens through projected volumes. We also enabled [audience validation in TokenReview](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#tokenreview-v1-authentication-k8s-io) for the new tokens for improved scoping. Under audit logging, the new alpha-level "dynamic audit configuration" adds support for [dynamically registering webhooks to receive a stream of audit events](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#dynamic-backend). Finally, we've enhanced secrets protection by graduating [etcd encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) out of experimental. + +### SIG AWS + +In v1.13 we worked on tighter integrations of Kubernetes API objects with AWS services. These include three out-of-tree alpha feature releases: + +1) Alpha for AWS ALB (Application Load Balancer) integration to Kubernetes Ingress resources. +2) Alpha for CSI specification 0.3 integration to AWS EBS (Elastic Block Store) +3) Alpha for the cloudprovider-aws cloud controller manager binary. Additionally we added [aws-k8s-tester](https://github.com/kubernetes/test-infra/issues/9814), deployer interface for kubetest, to the test-infra repository. This plugin allowed us to integrate Prow to the 3 subprojects defined above in order to provide CI signal for all 3 features. The CI signal is visible [here](https://testgrid.k8s.io/) under SIG-AWS. + +For detailed release notes on the three alpha features from SIG AWS, please refer to the following Changelogs: + +- [aws-alb-ingress-controller v1.0.0](https://github.com/kubernetes-sigs/aws-alb-ingress-controller/releases/tag/v1.0.0) +- [aws-ebs-csi-driver v0.1](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/CHANGELOG-0.1.md) +- [cloudprovider-aws external v0.1.0](https://github.com/kubernetes/cloud-provider-aws/blob/master/changelogs/CHANGELOG-0.1.md) + +### SIG Azure + +For 1.13 SIG Azure was focused on adding additional Azure Disk support for Ultra SSD, Standard SSD, and Premium Azure Files. Azure Availability Zones and cross resource group nodes were also moved from Alpha to Beta in 1.13. + +### SIG Big Data + +During the 1.13 release cycle, SIG Big Data has been focused on community engagements relating to 3rd-party project integrations with Kubernetes. There have been no impacts on the 1.13 release. + +### SIG CLI + +Over the course of 1.13 release SIG CLI mostly focused on stabilizing the items we’ve been working on over the past releases such as server-side printing and its support in kubectl, as well as finishing [kubectl diff which is based on server-side dry-run feature](https://kubernetes.io/docs/concepts/overview/object-management-kubectl/#how-to-create-objects). We’ve continued separating kubectl code to prepare for extraction out of main repository. Finally, thanks to the awesome support and feedback from community we’ve managed to promote the new [plugin mechanism to Beta](https://kubernetes.io/docs/tasks/extend-kubectl/kubectl-plugins/). + +### SIG Cloud Provider + +For v1.13, SIG Cloud Provider has been focused on stabilizing the common APIs and interfaces consumed by cloud providers today. This involved auditing the cloud provider APIs for anything that should be deprecated as well as adding changes where necessary. In addition, SIG Cloud Provider has begun exploratory work around having a “cloud provider” e2e test suite which can be used to test common cloud provider functionalities with resources such as nodes and load balancers. + +We are also continuing our long running effort to extract all the existing cloud providers that live in k8s.io/kubernetes into their own respective repos. Along with this migration, we are slowly transitioning users to use the cloud-controller-manager for any cloud provider features instead of the kube-controller-manager. + +### SIG Cluster Lifecycle + +For 1.13 SIG Cluster Lifecycle is pleased to announce the long awaited promotion of kubeadm to stable GA, and the promotion of kubeadm’s configuration API to `v1beta1`. +In this release the SIG again focused on further improving the user experience on cluster creation and also fixing a number of bugs and other assorted improvements. + +Some notable changes in kubeadm since Kubernetes 1.12: + +- kubeadm’s configuration API is now `v1beta1`. The new configuration format provides improvements in - image repository management, addons configuration, and other areas. We encourage `v1alpha3` users to migrate to this configuration API using `kubeadm config migrate`, as `v1alpha3` will be removed in 1.14. The documentation for `v1beta1` can be found here: https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1 +- kubeadm has graduated `kubeadm alpha phase` commands to `kubeadm init phase`. This means that the phases of creating a control-plane node are now tightly integrated as part of the `init` command. Alpha features, not yet ready for GA are still kept under `kubeadm alpha` and we appreciate feedback on them. +- `kubeadm init` and `kubeadm init phase` now have a `--image-repository` flag, improving support for environments with limited access to official kubernetes repository. +- The DynamicKubeletConfig and SelfHosting functionality was moved outside of `kubeadm init` and feature gates and is now exposed under `kubeadm alpha`. +- Kubeadm init phase certs now support the `--csr-only` option, simplifying custom CA creation. +- `kubeadm join --experimental-control-plane` now automatically adds a new etcd member for `local etcd` mode, further simplifying required tasks for HA clusters setup. +- Improvements were made to `kubeadm reset` related to cleaning etcd and notifying the user about the state of iptables. +- kubeadm commands now print warnings if input YAML documents contain unknown or duplicate fields. +- kubeadm now properly recognizes Docker 18.09.0 and newer, but still treats 18.06 as the default supported version. +- kubeadm now automatically sets the `--pod-infra-container-image` flag when starting the kubelet. + +### SIG IBM Cloud + +The IBM Cloud SIG was focused on defining its charter and working towards moving its cloud provider code to an external repository with a goal to have this work done by the end of Kubernetes 1.14 release cycle. In the SIG meetings, we also made sure to share updates on the latest Kubernetes developments in the IBM Cloud like the availability of Kubernetes v1.12.2 in the IBM Cloud Kubernetes Service (IKS). The SIG updates were provided in the Kubernetes community weekly call and at the KubeCon China 2018. + +### SIG Multicluster + +Moving Federation v2 from Alpha towards Beta has been the focus of our effort over the past quarter. To this end we engaged with end users, and successfully enlisted additional contributors from companies including IBM, Amadeus, Cisco and others. Federation v2 provides a suite of decoupled API’s and re-usable components for building multi-cluster control planes. We plan to start releasing Beta components in late 2018. In addition, more minor updates were made to our cluster-registry and multi-cluster ingress sub-projects. + +### SIG Network + +For 1.13, the areas of focus were in IPv6, DNS improvements and some smaller items: +CoreDNS is now the default cluster DNS passing all of the scale/resource usage tests +Node-local DNS cache feature is available in Alpha. This feature deploys a lightweight DNS caching Daemonset that avoids the conntrack and converts queries from UDP to more reliable TCP. +PodReady++ feature now has `kubectl` CLI support. + +Progress was made towards finalizing the IPv6 dual stack support KEP and support for topological routing of services. + +### SIG Node + +SIG Node focused on stability and performance improvements in the 1.13 release. A new alpha feature is introduced to improve the mechanism that nodes heartbeat back to the control plane. The `NodeLease` feature results in the node using a `Lease` resource in the `kube-node-lease` namespace that is renewed periodically. The `NodeStatus` that was used previously to heartbeat back to the control plane is only updated when it changes. This reduces load on the control plane for large clusters. The Kubelet plugin registration mechanism, which enables automatic discovery of external plugins (including CSI and device plugins) has been promoted to stable in this release (introduced as alpha in 1.11 and promoted to beta in 1.12). + +### SIG Openstack + +The major theme for the SIG OpenStack release is the work-in-progress for removing the in-tree provider. This work, being done in conjunction with SIG Cloud Provider, is focusing on moving internal APIs that the OpenStack (and other providers) depends upon to staging to guarantee API stability. This work also included abstracting the in-tree Cinder API and refactoring code to the external Cinder provider to remove additional Cinder volume provider code. + +Additional work was also done to implement an OpenStack driver for the Cluster API effort lead by SIG Cluster Lifecycle. For the external Cloud-Provider-OpenStack code, the SIG largely focused on bug fixes and updates to match K8s 1.13 development. + +### SIG Scalability + +SIG Scalability has mostly focused on stability and deflaking our tests, investing into framework for writing scalability tests (ClusterLoader v2) with a goal to migrate all tests to it by the end of 2018 and on the work towards extending definition of Kubernetes scalability by providing more/better user-friendly SLIs/SLOs. + +### SIG Scheduling + +SIG Scheduling has mostly focused on stability in 1.13 and has postponed some of the major features to the next versions. There are still two notable changes: 1. TaintBasedEviction is moved to Beta and will be enabled by default. With this feature enabled, condition taints are automatically added to the nodes and pods can add tolerations for them if needed. 2. Pod critical annotation is deprecated. Pods should use pod priority instead of the annotation. + +It is worth noting again that kube-scheduler will use apiVersion `kubescheduler.config.k8s.io/v1alpha1` instead of `componentconfig/v1alpha1` in its configuration files in 1.13. + +### SIG Service Catalog + +The Service Plan Defaults feature is still under active development. +We continue to improve the UX for the svcat CLI, specifically filling in gaps for the new Namespaced Service Broker feature. + +### SIG Storage + +Over the last year, SIG Storage has been focused on adding support for the Container Storage Interface (CSI) to Kubernetes. The specification recently moved to 1.0, and on the heels of this achievement, Kubernetes v1.13 moves CSI support for PersistentVolumes to GA. + +With CSI the Kubernetes volume layer becomes truly extensible, allowing third party storage developers to write drivers making their storage systems available in Kubernetes without having to touch the core code. + +CSI was first introduction as alpha in Kubernetes v1.9 and moved to beta in Kubernetes v1.10. + +You can find a list of sample and production drivers in the [CSI Documentation](https://kubernetes.io/docs/concepts/storage/volumes/#csi). + +SIG Storage also moves support for Block Volumes to beta (introduced as alpha in v1.9) and support for Topology Aware Volume Scheduling to stable (introduced as alpha in v1.9 and promoted to beta in 1.10). + +### SIG UI + +The migration to the newest version of Angular is still under active development as it is most important thing on the roadmap at the moment. We are getting closer to to the new release. We continue fixing bugs and adding other improvements. + +### SIG VMWare + +Major focus for SIG VMware for this release is the work on moving internal APIs that the vSphere provider depends upon to staging to guarantee API stability. This work is being done in conjunction with SIG Cloud Provider and includes the creation of a brand new vsphere-csi plugin to replace the current volume functionalities in-tree. + +Additional work was also done to implement a vSphere provider for the Cluster API effort lead by SIG Cluster Lifecycle. For the out-of-tree vSphere cloud provider, the SIG largely focused on bug fixes and updates to match K8s 1.13 development. + +### SIG Windows + +SIG Windows focused on improving reliability for Windows and Kubernetes support + +## New Features + +- kubelet: When node lease feature is enabled, kubelet reports node status to api server only if there is some change or it didn't report over last report interval. ([#69753](https://github.com/kubernetes/kubernetes/pull/69753), [@wangzhen127](https://github.com/wangzhen127)) +- vSphereVolume implements Raw Block Volume Support ([#68761](https://github.com/kubernetes/kubernetes/pull/68761), [@fanzhangio](https://github.com/fanzhangio)) +- CRD supports multi-version Schema, Subresources and AdditionalPrintColumns (NOTE that CRDs created prior to 1.13 populated the top-level additionalPrinterColumns field by default. To apply an updated that changes to per-version additionalPrinterColumns, the top-level additionalPrinterColumns field must be explicitly set to null). ([#70211](https://github.com/kubernetes/kubernetes/pull/70211), [@roycaihw](https://github.com/roycaihw)) +- New addon in addon manager that automatically installs CSI CRDs if CSIDriverRegistry or CSINodeInfo feature gates are true. ([#70193](https://github.com/kubernetes/kubernetes/pull/70193), [@saad-ali](https://github.com/saad-ali)) +- Delegated authorization can now allow unrestricted access for `system:masters` like the main kube-apiserver ([#70671](https://github.com/kubernetes/kubernetes/pull/70671), [@deads2k](https://github.com/deads2k)) +- Added dns capabilities for Windows CNI plugins: ([#67435](https://github.com/kubernetes/kubernetes/pull/67435), [@feiskyer](https://github.com/feiskyer)) +- kube-apiserver: `--audit-webhook-version` and `--audit-log-version` now default to `audit.k8s.io/v1` if unspecified ([#70476](https://github.com/kubernetes/kubernetes/pull/70476), [@charrywanganthony](https://github.com/charrywanganthony)) +- kubeadm: timeoutForControlPlane is introduced as part of the API Server config, that controls the timeout for the wait for control plane to be up. Default value is 4 minutes. ([#70480](https://github.com/kubernetes/kubernetes/pull/70480), [@rosti](https://github.com/rosti)) +- `--api-audiences` now defaults to the `--service-account-issuer` if the issuer is provided but the API audience is not. ([#70308](https://github.com/kubernetes/kubernetes/pull/70308), [@mikedanese](https://github.com/mikedanese)) +- Added support for projected volume in describe function ([#70158](https://github.com/kubernetes/kubernetes/pull/70158), [@WanLinghao](https://github.com/WanLinghao)) +- kubeadm now automatically creates a new stacked etcd member when joining a new control plane node (does not applies to external etcd) ([#69486](https://github.com/kubernetes/kubernetes/pull/69486), [@fabriziopandini](https://github.com/fabriziopandini)) +- Display the usage of ephemeral-storage when using `kubectl describe node` ([#70268](https://github.com/kubernetes/kubernetes/pull/70268), [@Pingan2017](https://github.com/Pingan2017)) +- Added functionality to enable br_netfilter and ip_forward for debian packages to improve kubeadm support for CRI runtime besides Docker. ([#70152](https://github.com/kubernetes/kubernetes/pull/70152), [@ashwanikhemani](https://github.com/ashwanikhemani)) +- Added regions ap-northeast-3 and eu-west-3 to the list of well known AWS regions. ([#70252](https://github.com/kubernetes/kubernetes/pull/70252), [@nckturner](https://github.com/nckturner)) +- kubeadm: Implemented preflight check to ensure that number of CPUs ([#70048](https://github.com/kubernetes/kubernetes/pull/70048), [@bart0sh](https://github.com/bart0sh)) +- CoreDNS is now the default DNS server in kube-up deployments. ([#69883](https://github.com/kubernetes/kubernetes/pull/69883), [@chrisohaver](https://github.com/chrisohaver)) +- Opt out of chowning and chmoding from kubectl cp. ([#69573](https://github.com/kubernetes/kubernetes/pull/69573), [@bjhaid](https://github.com/bjhaid)) +- Failed to provision volume with StorageClass "azurefile-premium": failed to create share andy-mg1121-dynamic-pvc-1a7b2813-d1b7-11e8-9e96-000d3a03e16b in account f7228f99bcde411e8ba4900: failed to create file share, err: storage: service returned error: StatusCode=400, ErrorCode=InvalidHeaderValue, ErrorMessage=The value for one of the HTTP headers is not in the correct format. ([#69718](https://github.com/kubernetes/kubernetes/pull/69718), [@andyzhangx](https://github.com/andyzhangx)) +- `TaintBasedEvictions` feature is promoted to beta. ([#69824](https://github.com/kubernetes/kubernetes/pull/69824), [@Huang-Wei](https://github.com/Huang-Wei)) +- Fixed https://github.com/kubernetes/client-go/issues/478 by adding support for JSON Patch in client-go/dynamic/fake ([#69330](https://github.com/kubernetes/kubernetes/pull/69330), [@vaikas-google](https://github.com/vaikas-google)) +- Dry-run is promoted to Beta and will be enabled by default. ([#69644](https://github.com/kubernetes/kubernetes/pull/69644), [@apelisse](https://github.com/apelisse)) +- `kubectl get priorityclass` now prints value column by default. ([#69431](https://github.com/kubernetes/kubernetes/pull/69431), [@Huang-Wei](https://github.com/Huang-Wei)) +- Added a new container based image for running e2e tests ([#69368](https://github.com/kubernetes/kubernetes/pull/69368), [@dims](https://github.com/dims)) +- The `LC_ALL` and `LC_MESSAGES` env vars can now be used to set desired locale for `kubectl` while keeping `LANG` unchanged. ([#69500](https://github.com/kubernetes/kubernetes/pull/69500), [@m1kola](https://github.com/m1kola)) +- NodeLifecycleController: Now node lease renewal is treated as the heartbeat signal from the node, in addition to NodeStatus Update. ([#69241](https://github.com/kubernetes/kubernetes/pull/69241), [@wangzhen127](https://github.com/wangzhen127)) +- Added dynamic shared informers to write generic, non-generated controllers ([#69308](https://github.com/kubernetes/kubernetes/pull/69308), [@p0lyn0mial](https://github.com/p0lyn0mial)) +- Upgraded to etcd 3.3 client ([#69322](https://github.com/kubernetes/kubernetes/pull/69322), [@jpbetz](https://github.com/jpbetz)) +- It is now possible to use named ports in the `kubectl port-forward` command ([#69477](https://github.com/kubernetes/kubernetes/pull/69477), [@m1kola](https://github.com/m1kola)) +- `kubectl wait` now supports condition value checks other than true using `--for condition=available=false` ([#69295](https://github.com/kubernetes/kubernetes/pull/69295), [@deads2k](https://github.com/deads2k)) +- Updated defaultbackend image to 1.5. Users should concentrate on updating scripts to the new version. ([#69120](https://github.com/kubernetes/kubernetes/pull/69120), [@aledbf](https://github.com/aledbf)) +- Bumped Dashboard version to v1.10.0 ([#68450](https://github.com/kubernetes/kubernetes/pull/68450), [@jeefy](https://github.com/jeefy)) +- Added env variables to control CPU requests of kube-controller-manager and kube-scheduler. ([#68823](https://github.com/kubernetes/kubernetes/pull/68823), [@loburm](https://github.com/loburm)) +- PodSecurityPolicy objects now support a `MayRunAs` rule for `fsGroup` and `supplementalGroups` options. This allows specifying ranges of allowed GIDs for pods/containers without forcing a default GID the way `MustRunAs` does. This means that a container to which such a policy applies to won't use any fsGroup/supplementalGroup GID if not explicitly specified, yet a specified GID must still fall in the GID range according to the policy. ([#65135](https://github.com/kubernetes/kubernetes/pull/65135), [@stlaz](https://github.com/stlaz)) +- Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) +- When the BoundServiceAccountTokenVolumes Alpha feature is enabled, ServiceAccount volumes now use a projected volume source and their names have the prefix "kube-api-access". ([#69848](https://github.com/kubernetes/kubernetes/pull/69848), [@mikedanese](https://github.com/mikedanese)) +- Raw block volume support is promoted to beta, and enabled by default. This is accessible via the `volumeDevices` container field in pod specs, and the `volumeMode` field in persistent volume and persistent volume claims definitions. ([#71167](https://github.com/kubernetes/kubernetes/pull/71167), [@msau42](https://github.com/msau42)) +- TokenReview now supports audience validation of tokens with audiences other than the kube-apiserver. ([#62692](https://github.com/kubernetes/kubernetes/pull/62692), [@mikedanese](https://github.com/mikedanese)) +- StatefulSet is supported in `kubectl autoscale` command ([#71103](https://github.com/kubernetes/kubernetes/pull/71103), [@Pingan2017](https://github.com/Pingan2017)) +- Kubernetes v1.13 moves support for Container Storage Interface to GA. As part of this move Kubernetes now supports CSI v1.0.0 and deprecates support for CSI 0.3 and older releases. Older CSI drivers must be updated to CSI 1.0 and moved to the new kubelet plugin registration directory in order to work with Kubernetes 1.15+. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020), [@saad-ali](https://github.com/saad-ali)) +- Added option to create CSRs instead of certificates for kubeadm init phase certs and kubeadm alpha certs renew ([#70809](https://github.com/kubernetes/kubernetes/pull/70809), [@liztio](https://github.com/liztio)) +- Added a kubelet socket which serves an grpc service containing the devices used by containers on the node. ([#70508](https://github.com/kubernetes/kubernetes/pull/70508), [@dashpole](https://github.com/dashpole)) +- Added DynamicAuditing feature which allows for the configuration of audit webhooks through the use of an AuditSink API object. ([#67257](https://github.com/kubernetes/kubernetes/pull/67257), [@pbarker](https://github.com/pbarker)) +- The kube-apiserver's healthz now takes in an optional query parameter which allows you to disable health checks from causing healthz failures. ([#70676](https://github.com/kubernetes/kubernetes/pull/70676), [@logicalhan](https://github.com/logicalhan)) +- Introduced support for running a nodelocal dns cache. It is disabled by default, can be enabled by setting KUBE_ENABLE_NODELOCAL_DNS=true ([#70555](https://github.com/kubernetes/kubernetes/pull/70555), [@prameshj](https://github.com/prameshj)) +- Added readiness gates in extended output for pods ([#70775](https://github.com/kubernetes/kubernetes/pull/70775), [@freehan](https://github.com/freehan)) +- Added `Ready` column and improve human-readable output of Deployments and StatefulSets ([#70466](https://github.com/kubernetes/kubernetes/pull/70466), [@Pingan2017](https://github.com/Pingan2017)) +- Added `kubelet_container_log_size_bytes` metric representing the log file size of a container. ([#70749](https://github.com/kubernetes/kubernetes/pull/70749), [@brancz](https://github.com/brancz)) +- NodeLifecycleController: When node lease feature is enabled, node lease will be deleted when the corresponding node is deleted. ([#70034](https://github.com/kubernetes/kubernetes/pull/70034), [@wangzhen127](https://github.com/wangzhen127)) +- GCERegionalPersistentDisk feature is GA now! ([#70716](https://github.com/kubernetes/kubernetes/pull/70716), [@jingxu97](https://github.com/jingxu97)) +- Added secure port 10259 to the kube-scheduler (enabled by default) and deprecate old insecure port 10251. Without further flags self-signed certs are created on startup in memory. ([#69663](https://github.com/kubernetes/kubernetes/pull/69663), [@sttts](https://github.com/sttts)) + +## Release Notes From SIGs + +### SIG API Machinery + +- The OwnerReferencesPermissionEnforcement admission plugin now checks authorization for the correct scope (namespaced or cluster-scoped) of the owner resource type. Previously, it always checked permissions at the same scope as the child resource. ([#70389](https://github.com/kubernetes/kubernetes/pull/70389), [@caesarxuchao](https://github.com/caesarxuchao)) +- OpenAPI spec now correctly marks delete request's body parameter as optional ([#70032](https://github.com/kubernetes/kubernetes/pull/70032), [@iamneha](https://github.com/iamneha)) +- The rules for incrementing `metadata.generation` of custom resources changed: ([#69059](https://github.com/kubernetes/kubernetes/pull/69059), [@caesarxuchao](https://github.com/caesarxuchao)) + - If the custom resource participates the spec/status convention, the metadata.generation of the CR increments when there is any change, except for the changes to the metadata or the changes to the status. + - If the custom resource does not participate the spec/status convention, the metadata.generation of the CR increments when there is any change to the CR, except for changes to the metadata. + - A custom resource is considered to participate the spec/status convention if and only if the "CustomResourceSubresources" feature gate is turned on and the CRD has `.spec.subresources.status={}`. +- Fixed patch/update operations on multi-version custom resources ([#70087](https://github.com/kubernetes/kubernetes/pull/70087), [@liggitt](https://github.com/liggitt)) +- Reduced memory utilization of admission webhook metrics by removing resource related labels. ([#69895](https://github.com/kubernetes/kubernetes/pull/69895), [@jpbetz](https://github.com/jpbetz)) +- Kubelet can now parse PEM file containing both TLS certificate and key in arbitrary order. Previously key was always required to be first. ([#69536](https://github.com/kubernetes/kubernetes/pull/69536), [@awly](https://github.com/awly)) +- Code-gen: Removed lowercasing for project imports ([#68484](https://github.com/kubernetes/kubernetes/pull/68484), [@jsturtevant](https://github.com/jsturtevant)) +- Fixed client cert setup in delegating authentication logic ([#69430](https://github.com/kubernetes/kubernetes/pull/69430), [@DirectXMan12](https://github.com/DirectXMan12)) +- OpenAPI spec and API reference now reflect dryRun query parameter for POST/PUT/PATCH operations ([#69359](https://github.com/kubernetes/kubernetes/pull/69359), [@roycaihw](https://github.com/roycaihw)) +- Fixed the sample-apiserver so that its BanFlunder admission plugin can be used. ([#68417](https://github.com/kubernetes/kubernetes/pull/68417), [@MikeSpreitzer](https://github.com/MikeSpreitzer)) +- APIService availability related to networking glitches are corrected faster ([#68678](https://github.com/kubernetes/kubernetes/pull/68678), [@deads2k](https://github.com/deads2k)) +- Fixed an issue with stuck connections handling error responses ([#71412](https://github.com/kubernetes/kubernetes/pull/71412), [@liggitt](https://github.com/liggitt)) +- apiserver: fixed handling and logging of panics in REST handlers ([#71076](https://github.com/kubernetes/kubernetes/pull/71076), [@liggitt](https://github.com/liggitt)) +- kube-controller-manager no longer removes ownerReferences from ResourceQuota objects ([#70035](https://github.com/kubernetes/kubernetes/pull/70035), [@liggitt](https://github.com/liggitt)) +- "unfinished_work_microseconds" is added to the workqueue metrics; it can be used to detect stuck worker threads. (kube-controller-manager runs many workqueues.) ([#70884](https://github.com/kubernetes/kubernetes/pull/70884), [@lavalamp](https://github.com/lavalamp)) +- Timeouts set in ListOptions for clients are also be respected locally ([#70998](https://github.com/kubernetes/kubernetes/pull/70998), [@deads2k](https://github.com/deads2k)) +- Added support for CRD conversion webhook ([#67006](https://github.com/kubernetes/kubernetes/pull/67006), [@mbohlool](https://github.com/mbohlool)) +- client-go: fixed sending oversized data frames to spdystreams in remotecommand.NewSPDYExecutor ([#70999](https://github.com/kubernetes/kubernetes/pull/70999), [@liggitt](https://github.com/liggitt)) +- Fixed missing flags in `-controller-manager --help`. ([#71298](https://github.com/kubernetes/kubernetes/pull/71298), [@stewart-yu](https://github.com/stewart-yu)) +- Fixed missing flags in `kube-apiserver --help`. ([#70204](https://github.com/kubernetes/kubernetes/pull/70204), [@imjching](https://github.com/imjching)) +- The caBundle and service fields in admission webhook API objects now correctly indicate they are optional ([#70138](https://github.com/kubernetes/kubernetes/pull/70138), [@liggitt](https://github.com/liggitt)) +- Fixed an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt)) +- kube-controller-manager and cloud-controller-manager now hold generated serving certificates in-memory unless a writeable location is specified with --cert-dir ([#69884](https://github.com/kubernetes/kubernetes/pull/69884), [@liggitt](https://github.com/liggitt)) +- CCM server will not listen insecurely if secure port is specified ([#68982](https://github.com/kubernetes/kubernetes/pull/68982), [@aruneli](https://github.com/aruneli)) +- List operations against the API now return internal server errors instead of partially complete lists when a value cannot be transformed from storage. The updated behavior is consistent with all other operations that require transforming data from storage such as watch and get. ([#69399](https://github.com/kubernetes/kubernetes/pull/69399), [@mikedanese](https://github.com/mikedanese)) + +### SIG Auth + +- API Server can be configured to reject requests that cannot be audit-logged. ([#65763](https://github.com/kubernetes/kubernetes/pull/65763), [@x13n](https://github.com/x13n)) +- Go clients created from a kubeconfig that specifies a TokenFile now periodically reload the token from the specified file. ([#70606](https://github.com/kubernetes/kubernetes/pull/70606), [@mikedanese](https://github.com/mikedanese)) +- When `--rotate-server-certificates` is enabled, kubelet will no longer request a new certificate on startup if the current certificate on disk is satisfactory. ([#69991](https://github.com/kubernetes/kubernetes/pull/69991), [@agunnerson-ibm](https://github.com/agunnerson-ibm)) +- Added dynamic audit configuration api ([#67547](https://github.com/kubernetes/kubernetes/pull/67547), [@pbarker](https://github.com/pbarker)) +- Added ability to control primary GID of containers through Pod Spec and PodSecurityPolicy ([#67802](https://github.com/kubernetes/kubernetes/pull/67802), [@krmayankk](https://github.com/krmayankk)) +- kube-apiserver: the `NodeRestriction` admission plugin now prevents kubelets from modifying `Node` labels prefixed with `node-restriction.kubernetes.io/`. The `node-restriction.kubernetes.io/` label prefix is reserved for cluster administrators to use for labeling `Node` objects to target workloads to nodes in a way that kubelets cannot modify or spoof. ([#68267](https://github.com/kubernetes/kubernetes/pull/68267), [@liggitt](https://github.com/liggitt)) + +### SIG Autoscaling + +- Updated Cluster Autoscaler version to 1.13.0. See the [Release Notes](https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0) for more information. ([#71513](https://github.com/kubernetes/kubernetes/pull/71513), [@losipiuk](https://github.com/losipiuk)) + +### SIG AWS + +- `service.beta.kubernetes.io/aws-load-balancer-internal` now supports true and false values, previously it only supported non-empty strings ([#69436](https://github.com/kubernetes/kubernetes/pull/69436), [@mcrute](https://github.com/mcrute)) +- Added `service.beta.kubernetes.io/aws-load-balancer-security-groups` annotation to set the security groups to the AWS ELB to be the only ones specified in the annotation in case this is present (does not add `0.0.0.0/0`). ([#62774](https://github.com/kubernetes/kubernetes/pull/62774), [@Raffo](https://github.com/Raffo)) + +### SIG Azure + +- Ensured orphan public IPs on Azure deleted when service recreated with the same name. ([#70463](https://github.com/kubernetes/kubernetes/pull/70463), [@feiskyer](https://github.com/feiskyer)) +- Improved Azure instance metadata handling by adding caches. ([#70353](https://github.com/kubernetes/kubernetes/pull/70353), [@feiskyer](https://github.com/feiskyer)) +- Corrected check for non-Azure managed nodes with the Azure cloud provider ([#70135](https://github.com/kubernetes/kubernetes/pull/70135), [@marc-sensenich](https://github.com/marc-sensenich)) +- Fixed azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx)) +- DisksAreAttached --> getNodeDataDisks--> GetDataDisks --> getVirtualMachine --> vmCache.Get ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx)) + +### SIG CLI + +- `kubectl apply` can now change a deployment strategy from rollout to recreate without explicitly clearing the rollout-related fields ([#70436](https://github.com/kubernetes/kubernetes/pull/70436), [@liggitt](https://github.com/liggitt)) +- The `kubectl plugin list` command now displays discovered plugin paths in the same order as they are found in a user's PATH variable. ([#70443](https://github.com/kubernetes/kubernetes/pull/70443), [@juanvallejo](https://github.com/juanvallejo)) +- `kubectl get` no longer exits before printing all of its results if an error is found ([#70311](https://github.com/kubernetes/kubernetes/pull/70311), [@juanvallejo](https://github.com/juanvallejo)) +- Fixed a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre)) +- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot)) +- Fixed ability for admin/edit/view users to see controller revisions, needed for kubectl rollout commands ([#70699](https://github.com/kubernetes/kubernetes/pull/70699), [@liggitt](https://github.com/liggitt)) +- `kubectl rollout undo` now returns errors when attempting to rollback a deployment to a non-existent revision ([#70039](https://github.com/kubernetes/kubernetes/pull/70039), [@liggitt](https://github.com/liggitt)) +- kubectl run now generates apps/v1 deployments by default ([#71006](https://github.com/kubernetes/kubernetes/pull/71006), [@liggitt](https://github.com/liggitt)) +- The "kubectl cp" command now supports path shortcuts (../) in remote paths. ([#65189](https://github.com/kubernetes/kubernetes/pull/65189), [@juanvallejo](https://github.com/juanvallejo)) +- Fixed dry-run output in kubectl apply --prune ([#69344](https://github.com/kubernetes/kubernetes/pull/69344), [@zegl](https://github.com/zegl)) +- The kubectl wait command must handle when a watch returns an error vs closing by printing out the error and retrying the watch. ([#69389](https://github.com/kubernetes/kubernetes/pull/69389), [@smarterclayton](https://github.com/smarterclayton)) +- kubectl: support multiple arguments for cordon/uncordon and drain ([#68655](https://github.com/kubernetes/kubernetes/pull/68655), [@goodluckbot](https://github.com/goodluckbot)) + +### SIG Cloud Provider + +- Added deprecation warning for all cloud providers ([#69171](https://github.com/kubernetes/kubernetes/pull/69171), [@andrewsykim](https://github.com/andrewsykim)) + +### SIG Cluster Lifecycle + +- kubeadm: Updates version of CoreDNS to 1.2.6 ([#70796](https://github.com/kubernetes/kubernetes/pull/70796), [@detiber](https://github.com/detiber)) +- kubeadm: Validate kubeconfig files in case of external CA mode. ([#70537](https://github.com/kubernetes/kubernetes/pull/70537), [@yagonobre](https://github.com/yagonobre)) +- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti)) +- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti)) +- `kubeadm reset` now cleans up custom etcd data path ([#70003](https://github.com/kubernetes/kubernetes/pull/70003), [@yagonobre](https://github.com/yagonobre)) +- kubeadm: Fixed unnecessary upgrades caused by undefined order of Volumes and VolumeMounts in manifests ([#70027](https://github.com/kubernetes/kubernetes/pull/70027), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: Fixed node join taints. ([#69846](https://github.com/kubernetes/kubernetes/pull/69846), [@andrewrynhard](https://github.com/andrewrynhard)) +- Fixed cluster autoscaler addon permissions so it can access batch/job. ([#69858](https://github.com/kubernetes/kubernetes/pull/69858), [@losipiuk](https://github.com/losipiuk)) +- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti)) +- kubeadm: Fixed a possible scenario where kubeadm can pull much newer control-plane images ([#69301](https://github.com/kubernetes/kubernetes/pull/69301), [@neolit123](https://github.com/neolit123)) +- kubeadm now allows mixing of init/cluster and join configuration in a single YAML file (although a warning gets printed in this case). ([#69426](https://github.com/kubernetes/kubernetes/pull/69426), [@rosti](https://github.com/rosti)) +- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm init correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71323](https://github.com/kubernetes/kubernetes/pull/71323), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: Always pass spec.nodeName as `--hostname-override` for kube-proxy ([#71283](https://github.com/kubernetes/kubernetes/pull/71283), [@Klaven](https://github.com/Klaven)) +- `kubeadm join` correctly uses `--node-name` and `--cri-socket` when `--config` option is also used ([#71270](https://github.com/kubernetes/kubernetes/pull/71270), [@bart0sh](https://github.com/bart0sh)) +- kubeadm now supports the `--image-repository` flag for customizing what registry to pull images from ([#71135](https://github.com/kubernetes/kubernetes/pull/71135), [@luxas](https://github.com/luxas)) +- kubeadm: The writable config file option for extra volumes is renamed to readOnly with a reversed meaning. With readOnly defaulted to false (as in pod specs). ([#70495](https://github.com/kubernetes/kubernetes/pull/70495), [@rosti](https://github.com/rosti)) +- kubeadm: Multiple API server endpoints support upon join is removed as it is now redundant. ([#69812](https://github.com/kubernetes/kubernetes/pull/69812), [@rosti](https://github.com/rosti)) +- kubeadm: JoinConfiguration now houses the discovery options in a nested Discovery structure, which in turn has a couple of other nested structures to house more specific options (BootstrapTokenDiscovery and FileDiscovery) ([#67763](https://github.com/kubernetes/kubernetes/pull/67763), [@rosti](https://github.com/rosti)) +- kubeadm: Added a `v1beta1` API. ([#69289](https://github.com/kubernetes/kubernetes/pull/69289), [@fabriziopandini](https://github.com/fabriziopandini)) +- kubeadm: Use `advertise-client-urls` instead of `listen-client-urls` as and `etcd-servers` options for apiserver. ([#69827](https://github.com/kubernetes/kubernetes/pull/69827), [@tomkukral](https://github.com/tomkukral)) +- Kubeadm now respects the custom image registry configuration across joins and upgrades. Kubeadm passes the custom registry to the kubelet for a custom pause container. ([#70603](https://github.com/kubernetes/kubernetes/pull/70603), [@chuckha](https://github.com/chuckha)) +- `kubeadm reset` now outputs instructions about manual iptables rules cleanup. ([#70874](https://github.com/kubernetes/kubernetes/pull/70874), [@rdodev](https://github.com/rdodev)) +- kubeadm: remove the AuditPolicyConfiguration feature gate ([#70807](https://github.com/kubernetes/kubernetes/pull/70807), [@Klaven](https://github.com/Klaven)) +- kubeadm pre-pulls Etcd image only if external Etcd is not used and ([#70743](https://github.com/kubernetes/kubernetes/pull/70743), [@bart0sh](https://github.com/bart0sh)) +- kubeadm: UnifiedControlPlaneImage is replaced by UseHyperKubeImage boolean value. ([#70793](https://github.com/kubernetes/kubernetes/pull/70793), [@rosti](https://github.com/rosti)) +- For kube-up and derived configurations, CoreDNS will honor master taints, for consistency with kube-dns behavior. ([#70868](https://github.com/kubernetes/kubernetes/pull/70868), [@justinsb](https://github.com/justinsb)) +- Recognize newer docker versions without -ce/-ee suffix: 18.09.0 ([#71001](https://github.com/kubernetes/kubernetes/pull/71001), [@thomas-riccardi](https://github.com/thomas-riccardi)) +- Any external provider should be aware the cloud-provider interface should be imported from :- ([#68310](https://github.com/kubernetes/kubernetes/pull/68310), [@cheftako](https://github.com/cheftako)) +- Fixed 'kubeadm upgrade' infinite loop waiting for pod restart ([#69886](https://github.com/kubernetes/kubernetes/pull/69886), [@bart0sh](https://github.com/bart0sh)) +- Bumped addon-manager to v8.8 ([#69337](https://github.com/kubernetes/kubernetes/pull/69337), [@MrHohn](https://github.com/MrHohn)) +- GCE: Filter out spammy audit logs from cluster autoscaler. ([#70696](https://github.com/kubernetes/kubernetes/pull/70696), [@loburm](https://github.com/loburm)) +- GCE: Enable by default audit logging truncating backend. ([#68288](https://github.com/kubernetes/kubernetes/pull/68288), [@loburm](https://github.com/loburm)) +- Bumped cluster-proportional-autoscaler to 1.3.0 ([#69338](https://github.com/kubernetes/kubernetes/pull/69338), [@MrHohn](https://github.com/MrHohn)) +- Updated defaultbackend to v1.5 ([#69334](https://github.com/kubernetes/kubernetes/pull/69334), [@bowei](https://github.com/bowei)) + +### SIG GCP + +- Added tolerations for Stackdriver Logging and Metadata Agents. ([#69737](https://github.com/kubernetes/kubernetes/pull/69737), [@qingling128](https://github.com/qingling128)) +- Enabled insertId generation, and updated Stackdriver Logging Agent image to 0.5-1.5.36-1-k8s. This help reduce log duplication and guarantee log order. ([#68920](https://github.com/kubernetes/kubernetes/pull/68920), [@qingling128](https://github.com/qingling128)) +- Updated crictl to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033), [@feiskyer](https://github.com/feiskyer)) + +### SIG Network + +- Corrected family type (inet6) for ipsets in ipv6-only clusters ([#68436](https://github.com/kubernetes/kubernetes/pull/68436), [@uablrek](https://github.com/uablrek)) +- kube-proxy argument `hostname-override` can be used to override hostname defined in the configuration file ([#69340](https://github.com/kubernetes/kubernetes/pull/69340), [@stevesloka](https://github.com/stevesloka)) +- CoreDNS correctly implements DNS spec for Services with externalNames that look like IP addresses. Kube-dns does not follow the spec for the same case, resulting in a behavior change when moving from Kube-dns to CoreDNS. See: [coredns/coredns#2324](https://github.com/coredns/coredns/issues/2324) +- IPVS proxier now set net/ipv4/vs/conn_reuse_mode to 0 by default, which will highly improve IPVS proxier performance. ([#71114](https://github.com/kubernetes/kubernetes/pull/71114), [@Lion-Wei](https://github.com/Lion-Wei)) +- CoreDNS is now version 1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799), [@rajansandeep](https://github.com/rajansandeep)) +- Addon configuration is introduced in the kubeadm config API, while feature flag CoreDNS is now deprecated. ([#70024](https://github.com/kubernetes/kubernetes/pull/70024), [@fabriziopandini](https://github.com/fabriziopandini)) + +### SIG Node + +- Fixed a bug in previous releases where a pod could be placed inside another pod's cgroup when specifying --cgroup-root ([#70678](https://github.com/kubernetes/kubernetes/pull/70678), [@dashpole](https://github.com/dashpole)) +- Optimized calculating stats when only CPU and Memory stats are returned from Kubelet stats/summary http endpoint. ([#68841](https://github.com/kubernetes/kubernetes/pull/68841), [@krzysztof-jastrzebski](https://github.com/krzysztof-jastrzebski)) +- kubelet now supports `log-file` option to write logs directly to a specific file ([#70917](https://github.com/kubernetes/kubernetes/pull/70917), [@dims](https://github.com/dims)) +- Do not detach volume if mount in progress ([#71145](https://github.com/kubernetes/kubernetes/pull/71145), [@gnufied](https://github.com/gnufied)) +- The runtimeHandler field on the RuntimeClass resource now accepts the empty string. ([#69550](https://github.com/kubernetes/kubernetes/pull/69550), [@tallclair](https://github.com/tallclair)) +- kube-apiserver: fixes `procMount` field incorrectly being marked as required in openapi schema ([#69694](https://github.com/kubernetes/kubernetes/pull/69694), [@jessfraz](https://github.com/jessfraz)) + +### SIG OpenStack + +- Fixed cloud-controller-manager crash when using OpenStack provider and PersistentVolume initializing controller ([#70459](https://github.com/kubernetes/kubernetes/pull/70459), [@mvladev](https://github.com/mvladev)) + +### SIG Release + +- Use debian-base instead of busybox as base image for server images ([#70245](https://github.com/kubernetes/kubernetes/pull/70245), [@ixdy](https://github.com/ixdy)) +- Images for cloud-controller-manager, kube-apiserver, kube-controller-manager, and kube-scheduler now contain a minimal /etc/nsswitch.conf and should respect /etc/hosts for lookups ([#69238](https://github.com/kubernetes/kubernetes/pull/69238), [@BenTheElder](https://github.com/BenTheElder)) + +### SIG Scheduling + +- Added metrics for volume scheduling operations ([#59529](https://github.com/kubernetes/kubernetes/pull/59529), [@wackxu](https://github.com/wackxu)) +- Improved memory use and performance when processing large numbers of pods containing tolerations ([#65350](https://github.com/kubernetes/kubernetes/pull/65350), [@liggitt](https://github.com/liggitt)) +- Fixed a bug in the scheduler that could cause the scheduler to go to an infinite loop when all nodes in a zone are removed. ([#69758](https://github.com/kubernetes/kubernetes/pull/69758), [@bsalamat](https://github.com/bsalamat)) +- Clear pod binding cache on bind error to make sure stale pod binding cache will not be used. ([#71212](https://github.com/kubernetes/kubernetes/pull/71212), [@cofyc](https://github.com/cofyc)) +- Fixed a scheduler panic due to internal cache inconsistency ([#71063](https://github.com/kubernetes/kubernetes/pull/71063), [@Huang-Wei](https://github.com/Huang-Wei)) +- Report kube-scheduler unhealthy if leader election is deadlocked. ([#71085](https://github.com/kubernetes/kubernetes/pull/71085), [@bsalamat](https://github.com/bsalamat)) +- Fixed a potential bug that scheduler preempts unnecessary pods. ([#70898](https://github.com/kubernetes/kubernetes/pull/70898), [@Huang-Wei](https://github.com/Huang-Wei)) + +### SIG Storage + +- Fixed CSI volume limits not showing up in node's capacity and allocatable ([#70540](https://github.com/kubernetes/kubernetes/pull/70540), [@gnufied](https://github.com/gnufied)) +- CSI drivers now have access to mountOptions defined on the storage class when attaching volumes. ([#67898](https://github.com/kubernetes/kubernetes/pull/67898), [@bswartz](https://github.com/bswartz)) +- change default azure file mount permission to 0777 ([#69854](https://github.com/kubernetes/kubernetes/pull/69854), [@andyzhangx](https://github.com/andyzhangx)) +- Fixed subpath in containerized kubelet. ([#69565](https://github.com/kubernetes/kubernetes/pull/69565), [@jsafrane](https://github.com/jsafrane)) +- Fixed panic on iSCSI volume tear down. ([#69140](https://github.com/kubernetes/kubernetes/pull/69140), [@jsafrane](https://github.com/jsafrane)) +- CSIPersistentVolume feature, i.e. PersistentVolumes with CSIPersistentVolumeSource, is GA. ([#69929](https://github.com/kubernetes/kubernetes/pull/69929), [@jsafrane](https://github.com/jsafrane)) +- Fixed CSIDriver API object to allow missing fields. ([#69331](https://github.com/kubernetes/kubernetes/pull/69331), [@jsafrane](https://github.com/jsafrane)) +- Flex volume plugins now support expandvolume (to increase underlying volume capacity) and expanfs (resize filesystem) commands that Flex plugin authors can implement to support expanding in use Flex PersistentVolumes ([#67851](https://github.com/kubernetes/kubernetes/pull/67851), [@aniket-s-kulkarni](https://github.com/aniket-s-kulkarni)) +- Enabled AttachVolumeLimit feature ([#69225](https://github.com/kubernetes/kubernetes/pull/69225), [@gnufied](https://github.com/gnufied)) +- The default storage class annotation for the storage addons has been changed to use the GA variant ([#68345](https://github.com/kubernetes/kubernetes/pull/68345), [@smelchior](https://github.com/smelchior)) +- GlusterFS PersistentVolumes sources can now reference endpoints in any namespace using the `spec.glusterfs.endpointsNamespace` field. Ensure all kubelets are upgraded to 1.13+ before using this capability. ([#60195](https://github.com/kubernetes/kubernetes/pull/60195), [@humblec](https://github.com/humblec)) +- Fixed GetVolumeLimits log flushing issue ([#69558](https://github.com/kubernetes/kubernetes/pull/69558), [@andyzhangx](https://github.com/andyzhangx)) +- The `MountPropagation` feature is unconditionally enabled in v1.13, and can no longer be disabled. ([#68230](https://github.com/kubernetes/kubernetes/pull/68230), [@bertinatto](https://github.com/bertinatto)) + +### SIG Windows + +- `kubelet --system-reserved` and `--kube-reserved` are supported now on Windows nodes ([#69960](https://github.com/kubernetes/kubernetes/pull/69960), [@feiskyer](https://github.com/feiskyer)) +- Windows runtime endpoints is now switched to `npipe:////./pipe/dockershim` from `tcp://localhost:3735`. ([#69516](https://github.com/kubernetes/kubernetes/pull/69516), [@feiskyer](https://github.com/feiskyer)) +- Fixed service issues with named targetPort for Windows ([#70076](https://github.com/kubernetes/kubernetes/pull/70076), [@feiskyer](https://github.com/feiskyer)) +- Handle Windows named pipes in host mounts. ([#69484](https://github.com/kubernetes/kubernetes/pull/69484), [@ddebroy](https://github.com/ddebroy)) +- Fixed inconsistency in windows kernel proxy when updating HNS policy. ([#68923](https://github.com/kubernetes/kubernetes/pull/68923), [@delulu](https://github.com/delulu)) + +## External Dependencies + +- Default etcd server is unchanged at v3.2.24 since Kubernetes 1.12. ([#68318](https://github.com/kubernetes/kubernetes/pull/68318)) +- The list of validated docker versions remain unchanged at 1.11.1, 1.12.1, 1.13.1, 17.03, 17.06, 17.09, 18.06 since Kubernetes 1.12. ([#68495](https://github.com/kubernetes/kubernetes/pull/68495)) +- The default Go version was updated to 1.11.2. ([#70665](https://github.com/kubernetes/kubernetes/pull/70665)) +- The minimum supported Go version was updated to 1.11.2 ([#69386](https://github.com/kubernetes/kubernetes/pull/69386)) +- CNI is unchanged at v0.6.0 since Kubernetes 1.10 ([#51250](https://github.com/kubernetes/kubernetes/pull/51250)) +- CSI is updated to 1.0.0. Pre-1.0.0 API support is now deprecated. ([#71020](https://github.com/kubernetes/kubernetes/pull/71020)]) +- The dashboard add-on has been updated to v1.10.0. ([#68450](https://github.com/kubernetes/kubernetes/pull/68450)) +- Heapster remains at v1.6.0-beta, but is now retired in Kubernetes 1.13 ([#67074](https://github.com/kubernetes/kubernetes/pull/67074)) +- Cluster Autoscaler has been upgraded to v1.13.0 ([#71513](https://github.com/kubernetes/kubernetes/pull/71513)) +- kube-dns is unchanged at v1.14.13 since Kubernetes 1.12 ([#68900](https://github.com/kubernetes/kubernetes/pull/68900)) +- Influxdb is unchanged at v1.3.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Grafana is unchanged at v4.4.3 since Kubernetes 1.10 ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Kibana has been upgraded to v6.3.2. ([#67582](https://github.com/kubernetes/kubernetes/pull/67582)) +- CAdvisor has been updated to v0.32.0 ([#70964](https://github.com/kubernetes/kubernetes/pull/70964)) +- fluentd-gcp-scaler has been updated to v0.5.0 ([#68837](https://github.com/kubernetes/kubernetes/pull/68837)) +- Fluentd in fluentd-elasticsearch is unchanged at v1.2.4 since Kubernetes 1.11 ([#67434](https://github.com/kubernetes/kubernetes/pull/67434)) +- fluentd-elasticsearch has been updated to v2.2.1 ([#68012](https://github.com/kubernetes/kubernetes/pull/68012)) +- The fluent-plugin-kubernetes_metadata_filter plugin in fluentd-elasticsearch is unchanged at 2.0.0 since Kubernetes 1.12 ([#67544](https://github.com/kubernetes/kubernetes/pull/67544)) +- fluentd-gcp has been updated to v3.2.0 ([#70954](https://github.com/kubernetes/kubernetes/pull/70954)) +- OIDC authentication is unchanged at coreos/go-oidc v2 since Kubernetes 1.10 ([#58544](https://github.com/kubernetes/kubernetes/pull/58544)) +- Calico was updated to v3.3.1 ([#70932](https://github.com/kubernetes/kubernetes/pull/70932)) +- Upgraded crictl on GCE to v1.12.0 ([#69033](https://github.com/kubernetes/kubernetes/pull/69033)) +- CoreDNS has been updated to v1.2.6 ([#70799](https://github.com/kubernetes/kubernetes/pull/70799)) +- event-exporter has been updated to v0.2.3 ([#67691](https://github.com/kubernetes/kubernetes/pull/67691)) +- Es-image remains unchanged at Elasticsearch 6.3.2 since Kubernetes 1.12 ([#67484](https://github.com/kubernetes/kubernetes/pull/67484)) +- metrics-server remains unchanged at v0.3.1 since Kubernetes 1.12 ([#68746](https://github.com/kubernetes/kubernetes/pull/68746)) +- GLBC remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- Ingress-gce remains unchanged at v1.2.3 since Kubernetes 1.12 ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- ip-masq-agen remains unchanged at v2.1.1 since Kubernetes 1.12 ([#67916](https://github.com/kubernetes/kubernetes/pull/67916)) + +# v1.13.0-rc.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.13.0-rc.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes.tar.gz) | `12fbaf943ae72711cd93c9955719ec1773a229dbb8f86a44fcda179229beb82add4dc1a54ceb50b9f48fde48e2464ed0cd4b2e57d9689a7ae784cb052beb6751` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-src.tar.gz) | `8e94f0fe73909610e85c201bb1ba4f66fd55ca2b4ded77217a4dfad2874d402cc1cc94203ecc195f909126c186701e5e1e62890ad288895493a1759f88a190d0` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-darwin-386.tar.gz) | `ac555f5d1e6b88fa4de1e06e0a1ebd372582f97c526c938334a8c63fbf17545607efbba9975d1767e147113e551e986d6523f6985ea41236cfbf7949df31f016` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-darwin-amd64.tar.gz) | `2eae428a0e4bcb2237343d7ac1e431ccfc1f7037622bb3131ad8d48a3af6f5ed34be899ec1ec32af7eb7d411cb0cda02a2413405479722ab868cdc816726c9df` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-386.tar.gz) | `89e671679b4516f184f7fd5ea0fe2a9ab0245fab34447625786bf55841223124527d3aa2ee6fa2474333f37eea4e9a5ba6f3f4dc3698907fd24bedf522f53b40` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-amd64.tar.gz) | `61f6513722e9c485300b822d6fc5998927bbffa18862d2d3f177a7c7cc0ee56c51ec169e3c8239e352c022094bb02124ed060d7d5c3cec9b67aae20ffd42f387` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-arm.tar.gz) | `ef0e5fd4bf2074dfd3cf54d45307550273695906baca3533a9d23424e7b693d706f6d1d3a09a34e2d1f84d9eddc6b62d96e5190b8c7145919e93f0ae75ec4d06` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-arm64.tar.gz) | `d34bb9ce9bfe2a5375fd58920e63b4eef818348719dba460f35838433af57a1a23fa659e53de52c8174fa212c94c4196ac5a02ce02ef714860488c77563b5821` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-ppc64le.tar.gz) | `4dc4e4a5e166e63360ba86e1278bbe75212ac7c3f60ba30425a1c5654bf5a9b1164543fdc23d7dfd9d3aea7be38544c8dc535459e96c062db631e58c5c628762` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-linux-s390x.tar.gz) | `d27675f4753469cd5e31faed13a1ea9654c25d38b0d96c1340215fd231050ffc66dc40c5103f8377339bacf00f1c99d386fe9c21fc68c5a21c10667f773d9d4b` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-windows-386.tar.gz) | `9d6e6de2d4a55eaeebd7fa6b861548e0768381d50838430722b56636428a3417b8f2bbc953bc365294a857d8f5b51d90807e5eafe874f37d9b726f48b5d04197` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-client-windows-amd64.tar.gz) | `30b2da5c015ef88b9efcf90bffe0498d367df7c126b65f2e878af263c5d62b8c93792dbf20511d0ff034c7a9e2c3fc93931860e1254ed158eddec34f407b9005` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-amd64.tar.gz) | `8180f2b788249fe65f7f1d3ee431ac758ede29a6349db312afbee080ff2c24586fc468f11a9cbcb8d22842739974f29e10793778f5fd5c55d10129e97a1efce3` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-arm.tar.gz) | `e9165284a0b82a9ab88dad05f43bfe1bebecad3bb1c7118475c3426e0b6f9f91d340e1e6223d81df9337ab4cc9a96708443c025030127acf88437f0c327b750b` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-arm64.tar.gz) | `03797c021ebed3b08835e72eed405c57aaacce972bbbbf88bf49310efbf8c7242f2f223d73b5d2ed4c21e5196e6e5fb7b2b811f08607db6dbe98f869bf28bedb` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-ppc64le.tar.gz) | `ceb49af22e3b518f3ba27c1e7de28e577e2735175e84a6d203f1f8766eceaa7c0424746ff71498d7847e98f538af5663b16cc306cb0adbb006d5d869766dfb9b` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-server-linux-s390x.tar.gz) | `bee4752e8a52e217ae1ffcfbc263453c724de684b4d463d5ddb24a3a30a67fc8f78e6c0a8154c6b6581d17f1e168903bc18d0e56f02fce5933f673bb4c74a8cf` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-amd64.tar.gz) | `b368989bbb8ab4d29b51d5d4d71d073b0ceb39614c944859dcd14c3303c31475850f7012deaa8d5ba9c17edd728bce536fbd523ae7defc74a30f0878f05497bf` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-arm.tar.gz) | `404b7b74a1e0d0fed9088a7e9461e02cfd9a6992c554baa125b7a361a6baa03d1e4622fbc4ec51836f00a7ac4f90167f345307678527f5781e06acdf526b9a45` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-arm64.tar.gz) | `fa531b1675a778c572a2175fb1bed00e78dc589f638f2096b3b5c9d3d691a5668787a43d69898678abd70c7b949e05cfebfb0783c0144a66bdff61fed6094582` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-ppc64le.tar.gz) | `a7ecc1f63e632c1b4f9b312babd6882ec966420bf4f8346edf80495fcf860d912729072c79d23cc071a07239783409b02c1f4a716a24e2597f2b490c9b3bb5b3` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-linux-s390x.tar.gz) | `a7171ed95de943a0ac5a32da4458e8d4366eb1fadbe426bebc371d2bb6536636b14db9d2cd03952258b3cb1b99fdca2db07947b028cc6c7bb92f4281ba6f62f2` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.13.0-rc.2/kubernetes-node-windows-amd64.tar.gz) | `8a3a71d142b99fb200c4c1c9c0fa4dc6a3b64a0b506dc37dc3d832a94a791619a09ae4b2c6f73802f6833234570633974547f7700c8bb6de71d91ba2c4ac4b54` + +## Changelog since v1.13.0-rc.1 + +### Other notable changes + +* Update Cluster Autoscaler version to 1.13.0. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0 ([#71513](https://github.com/kubernetes/kubernetes/pull/71513), [@losipiuk](https://github.com/losipiuk)) +* fix detach azure disk issue due to dirty cache ([#71495](https://github.com/kubernetes/kubernetes/pull/71495), [@andyzhangx](https://github.com/andyzhangx)) + + + # v1.13.0-rc.1 [Documentation](https://docs.k8s.io) @@ -100,9 +885,8 @@ filename | sha512 hash ### Other notable changes +* CVE-2018-1002105: Fix critical security issue in kube-apiserver upgrade request proxy handler ([#71411](https://github.com/kubernetes/kubernetes/issues/71411), [@liggitt](https://github.com/liggitt)) * Update Cluster Autoscaler version to 1.13.0-rc.2. Release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.13.0-rc.2 ([#71452](https://github.com/kubernetes/kubernetes/pull/71452), [@losipiuk](https://github.com/losipiuk)) -* Fixes an issue with stuck connections handling error responses ([#71419](https://github.com/kubernetes/kubernetes/pull/71419), [@liggitt](https://github.com/liggitt)) -* Fixes an issue with stuck connections handling error responses ([#71412](https://github.com/kubernetes/kubernetes/pull/71412), [@liggitt](https://github.com/liggitt)) * Upgrade Stackdriver Logging Agent addon image to 0.6-1.6.0-1 to use Fluentd v1.2. This provides nanoseconds timestamp granularity for logs. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954), [@qingling128](https://github.com/qingling128)) * fixes a runtime error occuring when sorting the output of `kubectl get` with empty results ([#70740](https://github.com/kubernetes/kubernetes/pull/70740), [@mfpierre](https://github.com/mfpierre)) * fix azure disk attach/detach failed forever issue ([#71377](https://github.com/kubernetes/kubernetes/pull/71377), [@andyzhangx](https://github.com/andyzhangx)) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD index fd5e6391c5..cdd40d9a99 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/BUILD @@ -165,6 +165,7 @@ go_test( "kubelet_pods_windows_test.go", "kubelet_resources_test.go", "kubelet_test.go", + "kubelet_volumes_linux_test.go", "kubelet_volumes_test.go", "main_test.go", "oom_watcher_test.go", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go index eb05a578c0..a577a6bfad 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_getters.go @@ -25,7 +25,7 @@ import ( cadvisorapiv1 "github.com/google/cadvisor/info/v1" "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/kubelet/cm" "k8s.io/kubernetes/pkg/kubelet/config" @@ -99,6 +99,13 @@ func (kl *Kubelet) getPodDir(podUID types.UID) string { return filepath.Join(kl.getPodsDir(), string(podUID)) } +// getPodVolumesSubpathsDir returns the full path to the per-pod subpaths directory under +// which subpath volumes are created for the specified pod. This directory may not +// exist if the pod does not exist or subpaths are not specified. +func (kl *Kubelet) getPodVolumeSubpathsDir(podUID types.UID) string { + return filepath.Join(kl.getPodDir(podUID), config.DefaultKubeletVolumeSubpathsDirName) +} + // getPodVolumesDir returns the full path to the per-pod data directory under // which volumes are created for the specified pod. This directory may not // exist if the pod does not exist. @@ -315,6 +322,19 @@ func (kl *Kubelet) getMountedVolumePathListFromDisk(podUID types.UID) ([]string, return mountedVolumes, nil } +// podVolumesSubpathsDirExists returns true if the pod volume-subpaths directory for +// a given pod exists +func (kl *Kubelet) podVolumeSubpathsDirExists(podUID types.UID) (bool, error) { + podVolDir := kl.getPodVolumeSubpathsDir(podUID) + + if pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil { + return true, fmt.Errorf("Error checking if path %q exists: %v", podVolDir, pathErr) + } else if !pathExists { + return false, nil + } + return true, nil +} + // GetVersionInfo returns information about the version of cAdvisor in use. func (kl *Kubelet) GetVersionInfo() (*cadvisorapiv1.VersionInfo, error) { return kl.cadvisor.VersionInfo() diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go index 7681ee6529..88b34c5efd 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/kubelet_volumes.go @@ -19,7 +19,7 @@ package kubelet import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -114,6 +114,8 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon } // If volumes have not been unmounted/detached, do not delete directory. // Doing so may result in corruption of data. + // TODO: getMountedVolumePathListFromDisk() call may be redundant with + // kl.getPodVolumePathListFromDisk(). Can this be cleaned up? if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist { klog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up", uid) continue @@ -128,6 +130,18 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume paths are still present on disk", uid)) continue } + + // If there are any volume-subpaths, do not cleanup directories + volumeSubpathExists, err := kl.podVolumeSubpathsDirExists(uid) + if err != nil { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but error %v occurred during reading of volume-subpaths dir from disk", uid, err)) + continue + } + if volumeSubpathExists { + orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("Orphaned pod %q found, but volume subpaths are still present on disk", uid)) + continue + } + klog.V(3).Infof("Orphaned pod %q found, removing", uid) if err := removeall.RemoveAllOneFilesystem(kl.mounter, kl.getPodDir(uid)); err != nil { klog.Errorf("Failed to remove orphaned pod %q dir; err: %v", uid, err) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD index 4165864e0a..7b59002486 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/BUILD @@ -34,6 +34,8 @@ go_test( go_library( name = "go_default_library", srcs = [ + "boottime_util_darwin.go", + "boottime_util_linux.go", "doc.go", "util.go", "util_unix.go", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go new file mode 100644 index 0000000000..09d3b8865d --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_darwin.go @@ -0,0 +1,44 @@ +// +build darwin + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + output, err := unix.SysctlRaw("kern.boottime") + if err != nil { + return time.Time{}, err + } + var timeval syscall.Timeval + if len(output) != int(unsafe.Sizeof(timeval)) { + return time.Time{}, fmt.Errorf("unexpected output when calling syscall kern.bootime. Expected len(output) to be %v, but got %v", + int(unsafe.Sizeof(timeval)), len(output)) + } + timeval = *(*syscall.Timeval)(unsafe.Pointer(&output[0])) + sec, nsec := timeval.Unix() + return time.Unix(sec, nsec).Truncate(time.Second), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go new file mode 100644 index 0000000000..f00e7c06bf --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/boottime_util_linux.go @@ -0,0 +1,36 @@ +// +build freebsd linux + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "time" + + "golang.org/x/sys/unix" +) + +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + currentTime := time.Now() + var info unix.Sysinfo_t + if err := unix.Sysinfo(&info); err != nil { + return time.Time{}, fmt.Errorf("error getting system uptime: %s", err) + } + return currentTime.Add(-time.Duration(info.Uptime) * time.Second).Truncate(time.Second), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go index 6661678ace..68a2bdf01b 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_unsupported.go @@ -45,3 +45,8 @@ func UnlockPath(fileHandles []uintptr) { func LocalEndpoint(path, file string) string { return "" } + +// GetBootTime empty implementation +func GetBootTime() (time.Time, error) { + return time.Time{}, fmt.Errorf("GetBootTime is unsupported in this build") +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go index 7123728ff9..92accc55e1 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/util_windows.go @@ -23,6 +23,7 @@ import ( "net" "net/url" "strings" + "syscall" "time" "github.com/Microsoft/go-winio" @@ -112,3 +113,15 @@ func LocalEndpoint(path, file string) string { } return u.String() + "//./pipe/" + file } + +var tickCount = syscall.NewLazyDLL("kernel32.dll").NewProc("GetTickCount64") + +// GetBootTime returns the time at which the machine was started, truncated to the nearest second +func GetBootTime() (time.Time, error) { + currentTime := time.Now() + output, _, err := tickCount.Call() + if errno, ok := err.(syscall.Errno); !ok || errno != 0 { + return time.Time{}, err + } + return currentTime.Add(-time.Duration(output) * time.Millisecond).Truncate(time.Second), nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD index 221afb7a9c..c9b9ce8a47 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/BUILD @@ -9,6 +9,7 @@ go_library( "exec_mount_unsupported.go", "fake.go", "mount.go", + "mount_helper.go", "mount_linux.go", "mount_unsupported.go", "mount_windows.go", @@ -67,6 +68,7 @@ go_test( name = "go_default_test", srcs = [ "exec_mount_test.go", + "mount_helper_test.go", "mount_linux_test.go", "mount_test.go", "mount_windows_test.go", diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go index 06e0fcccdc..0e2952f3e0 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/fake.go @@ -30,6 +30,8 @@ type FakeMounter struct { MountPoints []MountPoint Log []FakeAction Filesystem map[string]FileType + // Error to return for a path when calling IsLikelyNotMountPoint + MountCheckErrors map[string]error // Some tests run things in parallel, make sure the mounter does not produce // any golang's DATA RACE warnings. mutex sync.Mutex @@ -119,6 +121,7 @@ func (f *FakeMounter) Unmount(target string) error { } f.MountPoints = newMountpoints f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) + delete(f.MountCheckErrors, target) return nil } @@ -141,7 +144,12 @@ func (f *FakeMounter) IsLikelyNotMountPoint(file string) (bool, error) { f.mutex.Lock() defer f.mutex.Unlock() - _, err := os.Stat(file) + err := f.MountCheckErrors[file] + if err != nil { + return false, err + } + + _, err = os.Stat(file) if err != nil { return true, err } diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go new file mode 100644 index 0000000000..a06871e478 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_helper.go @@ -0,0 +1,124 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + "os" + "syscall" + + "k8s.io/klog" +) + +// CleanupMountPoint unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs. +func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error { + // mounter.ExistsPath cannot be used because for containerized kubelet, we need to check + // the path in the kubelet container, not on the host. + pathExists, pathErr := PathExists(mountPath) + if !pathExists { + klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) + return nil + } + corruptedMnt := IsCorruptedMnt(pathErr) + if pathErr != nil && !corruptedMnt { + return fmt.Errorf("Error checking path: %v", pathErr) + } + return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) +} + +// doCleanupMountPoint unmounts the given path and +// deletes the remaining directory if successful. +// if extensiveMountPointCheck is true +// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. +// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs. +// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check +// will be skipped +func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { + if !corruptedMnt { + var notMnt bool + var err error + if extensiveMountPointCheck { + notMnt, err = IsNotMountPoint(mounter, mountPath) + } else { + notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) + } + + if err != nil { + return err + } + + if notMnt { + klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) + return os.Remove(mountPath) + } + } + + // Unmount the mount path + klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) + if err := mounter.Unmount(mountPath); err != nil { + return err + } + + notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) + if mntErr != nil { + return mntErr + } + if notMnt { + klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) + return os.Remove(mountPath) + } + return fmt.Errorf("Failed to unmount path %v", mountPath) +} + +// TODO: clean this up to use pkg/util/file/FileExists +// PathExists returns true if the specified path exists. +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } else if os.IsNotExist(err) { + return false, nil + } else if IsCorruptedMnt(err) { + return true, err + } else { + return false, err + } +} + +// IsCorruptedMnt return true if err is about corrupted mount point +func IsCorruptedMnt(err error) bool { + if err == nil { + return false + } + var underlyingError error + switch pe := err.(type) { + case nil: + return false + case *os.PathError: + underlyingError = pe.Err + case *os.LinkError: + underlyingError = pe.Err + case *os.SyscallError: + underlyingError = pe.Err + } + + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 6ebeff053b..85a9016968 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -55,6 +55,7 @@ const ( fsckErrorsUncorrected = 4 // place for subpath mounts + // TODO: pass in directory using kubelet_getters instead containerSubPathDirectoryName = "volume-subpaths" // syscall.Openat flags used to traverse directories not following symlinks nofollowFlags = unix.O_RDONLY | unix.O_NOFOLLOW @@ -890,15 +891,22 @@ func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error // scan /var/lib/kubelet/pods//volume-subpaths///* fullContainerDirPath := filepath.Join(subPathDir, containerDir.Name()) - subPaths, err := ioutil.ReadDir(fullContainerDirPath) - if err != nil { - return fmt.Errorf("error reading %s: %s", fullContainerDirPath, err) - } - for _, subPath := range subPaths { - if err = doCleanSubPath(mounter, fullContainerDirPath, subPath.Name()); err != nil { + err = filepath.Walk(fullContainerDirPath, func(path string, info os.FileInfo, err error) error { + if path == fullContainerDirPath { + // Skip top level directory + return nil + } + + // pass through errors and let doCleanSubPath handle them + if err = doCleanSubPath(mounter, fullContainerDirPath, filepath.Base(path)); err != nil { return err } + return nil + }) + if err != nil { + return fmt.Errorf("error processing %s: %s", fullContainerDirPath, err) } + // Whole container has been processed, remove its directory. if err := os.Remove(fullContainerDirPath); err != nil { return fmt.Errorf("error deleting %s: %s", fullContainerDirPath, err) @@ -925,22 +933,12 @@ func doCleanSubPath(mounter Interface, fullContainerDirPath, subPathIndex string // process /var/lib/kubelet/pods//volume-subpaths/// klog.V(4).Infof("Cleaning up subpath mounts for subpath %v", subPathIndex) fullSubPath := filepath.Join(fullContainerDirPath, subPathIndex) - notMnt, err := IsNotMountPoint(mounter, fullSubPath) - if err != nil { - return fmt.Errorf("error checking %s for mount: %s", fullSubPath, err) - } - // Unmount it - if !notMnt { - if err = mounter.Unmount(fullSubPath); err != nil { - return fmt.Errorf("error unmounting %s: %s", fullSubPath, err) - } - klog.V(5).Infof("Unmounted %s", fullSubPath) - } - // Remove it *non*-recursively, just in case there were some hiccups. - if err = os.Remove(fullSubPath); err != nil { - return fmt.Errorf("error deleting %s: %s", fullSubPath, err) + + if err := CleanupMountPoint(fullSubPath, mounter, true); err != nil { + return fmt.Errorf("error cleaning subpath mount %s: %s", fullSubPath, err) } - klog.V(5).Infof("Removed %s", fullSubPath) + + klog.V(4).Infof("Successfully cleaned subpath directory %s", fullSubPath) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD index 64eac778de..8ccd019eb0 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/BUILD @@ -63,7 +63,6 @@ go_test( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/kubelet/apis:go_default_library", - "//pkg/util/mount:go_default_library", "//pkg/util/slice:go_default_library", "//pkg/volume:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -73,8 +72,12 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature/testing:go_default_library", - "//staging/src/k8s.io/client-go/util/testing:go_default_library", - ], + ] + select({ + "@io_bazel_rules_go//go/platform:linux": [ + "//staging/src/k8s.io/client-go/util/testing:go_default_library", + ], + "//conditions:default": [], + }), ) filegroup( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index 070961c282..18e24d69f0 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -23,9 +23,8 @@ import ( "path" "path/filepath" "strings" - "syscall" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -128,8 +127,9 @@ func SetReady(dir string) { // UnmountPath is a common unmount routine that unmounts the given path and // deletes the remaining directory if successful. +// TODO: Remove this function and change callers to call mount pkg directly func UnmountPath(mountPath string, mounter mount.Interface) error { - return UnmountMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) + return mount.CleanupMountPoint(mountPath, mounter, false /* extensiveMountPointCheck */) } // UnmountMountPoint is a common unmount routine that unmounts the given path and @@ -137,93 +137,21 @@ func UnmountPath(mountPath string, mounter mount.Interface) error { // if extensiveMountPointCheck is true // IsNotMountPoint will be called instead of IsLikelyNotMountPoint. // IsNotMountPoint is more expensive but properly handles bind mounts. +// TODO: Change callers to call mount pkg directly func UnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool) error { - pathExists, pathErr := PathExists(mountPath) - if !pathExists { - klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath) - return nil - } - corruptedMnt := IsCorruptedMnt(pathErr) - if pathErr != nil && !corruptedMnt { - return fmt.Errorf("Error checking path: %v", pathErr) - } - return doUnmountMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt) -} - -// doUnmountMountPoint is a common unmount routine that unmounts the given path and -// deletes the remaining directory if successful. -// if extensiveMountPointCheck is true -// IsNotMountPoint will be called instead of IsLikelyNotMountPoint. -// IsNotMountPoint is more expensive but properly handles bind mounts. -// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, Take it as an argument for convenience of testing -func doUnmountMountPoint(mountPath string, mounter mount.Interface, extensiveMountPointCheck bool, corruptedMnt bool) error { - if !corruptedMnt { - var notMnt bool - var err error - if extensiveMountPointCheck { - notMnt, err = mount.IsNotMountPoint(mounter, mountPath) - } else { - notMnt, err = mounter.IsLikelyNotMountPoint(mountPath) - } - - if err != nil { - return err - } - - if notMnt { - klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath) - return os.Remove(mountPath) - } - } - - // Unmount the mount path - klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath) - if err := mounter.Unmount(mountPath); err != nil { - return err - } - notMnt, mntErr := mounter.IsLikelyNotMountPoint(mountPath) - if mntErr != nil { - return mntErr - } - if notMnt { - klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath) - return os.Remove(mountPath) - } - return fmt.Errorf("Failed to unmount path %v", mountPath) + return mount.CleanupMountPoint(mountPath, mounter, extensiveMountPointCheck) } // PathExists returns true if the specified path exists. +// TODO: Change callers to call mount pkg directly func PathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } else if os.IsNotExist(err) { - return false, nil - } else if IsCorruptedMnt(err) { - return true, err - } else { - return false, err - } + return mount.PathExists(path) } // IsCorruptedMnt return true if err is about corrupted mount point +// TODO: Change callers to call mount pkg directly func IsCorruptedMnt(err error) bool { - if err == nil { - return false - } - var underlyingError error - switch pe := err.(type) { - case nil: - return false - case *os.PathError: - underlyingError = pe.Err - case *os.LinkError: - underlyingError = pe.Err - case *os.SyscallError: - underlyingError = pe.Err - } - - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO + return mount.IsCorruptedMnt(err) } // GetSecretForPod locates secret by name in the pod's namespace and returns secret map @@ -825,9 +753,10 @@ func GetUniqueVolumeName(pluginName, volumeName string) v1.UniqueVolumeName { return v1.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName)) } -// GetUniqueVolumeNameForNonAttachableVolume returns the unique volume name -// for a non-attachable volume. -func GetUniqueVolumeNameForNonAttachableVolume( +// GetUniqueVolumeNameFromSpecWithPod returns a unique volume name with pod +// name included. This is useful to generate different names for different pods +// on same volume. +func GetUniqueVolumeNameFromSpecWithPod( podName types.UniquePodName, volumePlugin volume.VolumePlugin, volumeSpec *volume.Spec) v1.UniqueVolumeName { return v1.UniqueVolumeName( fmt.Sprintf("%s/%v-%s", volumePlugin.GetPluginName(), podName, volumeSpec.Name()))