diff --git a/CODEOWNERS b/CODEOWNERS index 3607f0a02bae..f3fff86f24ee 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -31,9 +31,9 @@ extensions/filters/common/original_src @snowp @klarose # sni_cluster extension /*/extensions/filters/network/sni_cluster @rshriram @lizan # tracers.datadog extension -/*/extensions/tracers/datadog @cgilmour @palazzem +/*/extensions/tracers/datadog @cgilmour @palazzem @mattklein123 # tracers.xray extension -/*/extensions/tracers/xray @marcomagdy @lavignes +/*/extensions/tracers/xray @marcomagdy @lavignes @mattklein123 # mysql_proxy extension /*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123 # quic extension @@ -57,9 +57,9 @@ extensions/filters/common/original_src @snowp @klarose # http inspector /*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan # attribute context -/*/extensions/filters/common/expr @kyessenov @yangminzhu +/*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan # webassembly common extension -/*/extensions/common/wasm @jplevyak @PiotrSikora +/*/extensions/common/wasm @jplevyak @PiotrSikora @lizan # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste /*/extensions/filters/http/grpc_http1_bridge @snowp @jose diff --git a/api/API_VERSIONING.md b/api/API_VERSIONING.md index 71f28eb84581..b31d9f723215 100644 --- a/api/API_VERSIONING.md +++ b/api/API_VERSIONING.md @@ -141,8 +141,7 @@ guided by annotations in protobuf. field or enum value. No field may be marked as deprecated unless a replacement for this functionality exists and the corresponding Envoy implementation is production ready. -* Renames are specified with a `[#rename-at-next-major-version: ]` protobuf comment - annotation. +* Renames are specified with a `[(udpa.annotations.field_migrate).rename = ""]` annotation. * We anticipate that `protoxform` will also support `oneof` promotion, package movement, etc. via similar annotations. diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 928b1f2ff589..cddd46c3faae 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -1,5 +1,5 @@ -BAZEL_SKYLIB_RELEASE = "0.8.0" -BAZEL_SKYLIB_SHA256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e" +BAZEL_SKYLIB_RELEASE = "0.9.0" +BAZEL_SKYLIB_SHA256 = "1dde365491125a3db70731e25658dfdd3bc5dbdfd11b840b3e987ecf043c7ca0" OPENCENSUS_PROTO_GIT_SHA = "5cec5ea58c3efa81fa808f2bd38ce182da9ee731" # Jul 25, 2019 OPENCENSUS_PROTO_SHA256 = "faeb93f293ff715b0cb530d273901c0e2e99277b9ed1c0a0326bca9ec5774ad2" @@ -24,7 +24,7 @@ ZIPKINAPI_SHA256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc REPOSITORY_LOCATIONS = dict( bazel_skylib = dict( sha256 = BAZEL_SKYLIB_SHA256, - urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel-skylib." + BAZEL_SKYLIB_RELEASE + ".tar.gz"], + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/" + BAZEL_SKYLIB_RELEASE + "/bazel_skylib-" + BAZEL_SKYLIB_RELEASE + ".tar.gz"], ), com_envoyproxy_protoc_gen_validate = dict( sha256 = PGV_SHA256, diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD index 776207ac2f28..b570ccfdeee3 100644 --- a/api/envoy/api/v2/route/BUILD +++ b/api/envoy/api/v2/route/BUILD @@ -10,5 +10,6 @@ api_proto_package( "//envoy/type:pkg", "//envoy/type/matcher:pkg", "//envoy/type/tracing/v2:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto index 0e601d6558ca..d87f5e68fa40 100644 --- a/api/envoy/api/v2/route/route.proto +++ b/api/envoy/api/v2/route/route.proto @@ -18,6 +18,7 @@ import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; // [#protodoc-title: HTTP route] @@ -749,9 +750,7 @@ message RouteAction { oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. - // - // [#next-major-version: host_rewrite_literal] - string host_rewrite = 6; + string host_rewrite = 6 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This @@ -768,9 +767,8 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. - // - // [#next-major-version: host_rewrite_header] - string auto_host_rewrite_header = 29; + string auto_host_rewrite_header = 29 + [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This diff --git a/api/envoy/api/v3alpha/route/route.proto b/api/envoy/api/v3alpha/route/route.proto index 6b00b7b4526e..2fd41d784fa1 100644 --- a/api/envoy/api/v3alpha/route/route.proto +++ b/api/envoy/api/v3alpha/route/route.proto @@ -734,9 +734,7 @@ message RouteAction { oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. - // - // [#next-major-version: host_rewrite_literal] - string host_rewrite = 6; + string host_rewrite_literal = 6; // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This @@ -753,9 +751,7 @@ message RouteAction { // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. - // - // [#next-major-version: host_rewrite_header] - string auto_host_rewrite_header = 29; + string host_rewrite_header = 29; } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD index f6ae67b2e3a6..3068bcca824e 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD @@ -5,5 +5,8 @@ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( - deps = ["//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg"], + deps = [ + "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], ) diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto index 4c4e1a5753cb..b515ffdac41e 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto @@ -8,6 +8,7 @@ option java_multiple_files = true; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; // [#protodoc-title: Dynamic forward proxy] @@ -35,9 +36,7 @@ message PerRouteConfig { // :ref:`HCM host rewrite ` given that the // value set here would be used for DNS lookups whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. - // - // [#next-major-version: host_rewrite_literal] - string host_rewrite = 1; + string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; // Indicates that before DNS lookup, the host header will be swapped with // the value of this header. If not set or empty, the original host header @@ -48,8 +47,7 @@ message PerRouteConfig { // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. - // - // [#next-major-version: host_rewrite_header] - string auto_host_rewrite_header = 2; + string auto_host_rewrite_header = 2 + [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; } } diff --git a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto index e28aabfb06a4..4df0b26d204c 100644 --- a/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto +++ b/api/envoy/config/filter/http/dynamic_forward_proxy/v3alpha/dynamic_forward_proxy.proto @@ -43,9 +43,7 @@ message PerRouteConfig { // :ref:`HCM host rewrite ` given // that the value set here would be used for DNS lookups whereas the value set in the HCM would // be used for host header forwarding which is not the desired outcome. - // - // [#next-major-version: host_rewrite_literal] - string host_rewrite = 1; + string host_rewrite_literal = 1; // Indicates that before DNS lookup, the host header will be swapped with // the value of this header. If not set or empty, the original host header @@ -57,8 +55,6 @@ message PerRouteConfig { // ` given that the // value set here would be used for DNS lookups whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. - // - // [#next-major-version: host_rewrite_header] - string auto_host_rewrite_header = 2; + string host_rewrite_header = 2; } } diff --git a/api/envoy/service/ratelimit/v2/BUILD b/api/envoy/service/ratelimit/v2/BUILD index 206e27296172..3e2a630d507b 100644 --- a/api/envoy/service/ratelimit/v2/BUILD +++ b/api/envoy/service/ratelimit/v2/BUILD @@ -9,5 +9,6 @@ api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/ratelimit:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/service/ratelimit/v2/rls.proto b/api/envoy/service/ratelimit/v2/rls.proto index d184657ab8b4..8c2bb0496f21 100644 --- a/api/envoy/service/ratelimit/v2/rls.proto +++ b/api/envoy/service/ratelimit/v2/rls.proto @@ -10,6 +10,7 @@ option java_generic_services = true; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; +import "udpa/annotations/migrate.proto"; import "validate/validate.proto"; // [#protodoc-title: Rate Limit Service (RLS)] @@ -102,8 +103,8 @@ message RateLimitResponse { repeated DescriptorStatus statuses = 2; // A list of headers to add to the response - // [#next-major-version: rename to response_headers_to_add] - repeated api.v2.core.HeaderValue headers = 3; + repeated api.v2.core.HeaderValue headers = 3 + [(udpa.annotations.field_migrate).rename = "response_headers_to_add"]; // A list of headers to add to the request when forwarded repeated api.v2.core.HeaderValue request_headers_to_add = 4; diff --git a/api/envoy/service/ratelimit/v3alpha/rls.proto b/api/envoy/service/ratelimit/v3alpha/rls.proto index 5c1430350a24..79f06f58af9f 100644 --- a/api/envoy/service/ratelimit/v3alpha/rls.proto +++ b/api/envoy/service/ratelimit/v3alpha/rls.proto @@ -116,8 +116,7 @@ message RateLimitResponse { repeated DescriptorStatus statuses = 2; // A list of headers to add to the response - // [#next-major-version: rename to response_headers_to_add] - repeated api.v3alpha.core.HeaderValue headers = 3; + repeated api.v3alpha.core.HeaderValue response_headers_to_add = 3; // A list of headers to add to the request when forwarded repeated api.v3alpha.core.HeaderValue request_headers_to_add = 4; diff --git a/bazel/dependency_imports.bzl b/bazel/dependency_imports.bzl index f45e22dda8e3..051923e315a4 100644 --- a/bazel/dependency_imports.bzl +++ b/bazel/dependency_imports.bzl @@ -6,7 +6,7 @@ load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependenci load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") # go version for rules_go -GO_VERSION = "1.13.3" +GO_VERSION = "1.13.5" def envoy_dependency_imports(go_version = GO_VERSION): rules_foreign_cc_dependencies() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index b9eaa2099831..a9b76c9839bf 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -1,29 +1,29 @@ REPOSITORY_LOCATIONS = dict( bazel_compdb = dict( - sha256 = "801b35d996a097d223e028815fdba8667bf62bc5efb353486603d31fc2ba6ff9", - strip_prefix = "bazel-compilation-database-0.4.1", - urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.1.tar.gz"], + sha256 = "87e376a685eacfb27bcc0d0cdf5ded1d0b99d868390ac50f452ba6ed781caffe", + strip_prefix = "bazel-compilation-database-0.4.2", + urls = ["https://github.com/grailbio/bazel-compilation-database/archive/0.4.2.tar.gz"], ), bazel_gazelle = dict( - sha256 = "41bff2a0b32b02f20c227d234aa25ef3783998e5453f7eade929704dcff7cd4b", - urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.0/bazel-gazelle-v0.19.0.tar.gz"], + sha256 = "86c6d481b3f7aedc1d60c1c211c6f76da282ae197c3b3160f54bd3a8f847896f", + urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.19.1/bazel-gazelle-v0.19.1.tar.gz"], ), bazel_toolchains = dict( - sha256 = "83352b6e68fa797184071f35e3b67c7c8815efadcea81bb9cdb6bbbf2e07d389", - strip_prefix = "bazel-toolchains-1.1.3", + sha256 = "ca8aa49ceb47e9bee04dd67f0bec0b010032b37ebbe67147b535237e801d9a87", + strip_prefix = "bazel-toolchains-1.2.2", urls = [ - "https://github.com/bazelbuild/bazel-toolchains/releases/download/1.1.3/bazel-toolchains-1.1.3.tar.gz", - "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/1.1.3.tar.gz", + "https://github.com/bazelbuild/bazel-toolchains/releases/download/1.2.2/bazel-toolchains-1.2.2.tar.gz", + "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/1.2.2.tar.gz", ], ), build_bazel_rules_apple = dict( - urls = ["https://github.com/bazelbuild/rules_apple/archive/b869b0d3868d78a1d4ffd866ccb304fb68aa12c3.tar.gz"], - strip_prefix = "rules_apple-b869b0d3868d78a1d4ffd866ccb304fb68aa12c3", - sha256 = "bdc8e66e70b8a75da23b79f1f8c6207356df07d041d96d2189add7ee0780cf4e", + sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", + urls = ["https://github.com/bazelbuild/rules_apple/releases/download/0.19.0/rules_apple.0.19.0.tar.gz"], ), envoy_build_tools = dict( sha256 = "a81ff3a12adedfc4641a926c9b167c53bea62784a81ac9ced7893436c709b60b", strip_prefix = "envoy-build-tools-07314d549e27e9a4033af6236888d2a9ee0ad443", + # 2019-11-22 urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/07314d549e27e9a4033af6236888d2a9ee0ad443.tar.gz"], ), boringssl = dict( @@ -234,8 +234,8 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/2feabd5d64436e670084091a937855972ee35161.tar.gz"], ), io_bazel_rules_go = dict( - sha256 = "842ec0e6b4fbfdd3de6150b61af92901eeb73681fd4d185746644c338f51d4c0", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.1/rules_go-v0.20.1.tar.gz"], + sha256 = "e88471aea3a3a4f19ec1310a55ba94772d087e9ce46e41ae38ecebe17935de7b", + urls = ["https://github.com/bazelbuild/rules_go/releases/download/v0.20.3/rules_go-v0.20.3.tar.gz"], ), rules_foreign_cc = dict( sha256 = "3184c244b32e65637a74213fc448964b687390eeeca42a36286f874c046bba15", @@ -267,9 +267,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/13b1a2f29f541b6b2c4cb8bc3f6fbf3589d44227.tar.gz"], ), com_github_curl = dict( - sha256 = "d0393da38ac74ffac67313072d7fe75b1fa1010eb5987f63f349b024a36b7ffb", - strip_prefix = "curl-7.66.0", - urls = ["https://github.com/curl/curl/releases/download/curl-7_66_0/curl-7.66.0.tar.gz"], + sha256 = "52af3361cf806330b88b4fe6f483b6844209d47ae196ac46da4de59bb361ab02", + strip_prefix = "curl-7.67.0", + urls = ["https://github.com/curl/curl/releases/download/curl-7_67_0/curl-7.67.0.tar.gz"], ), com_googlesource_chromium_v8 = dict( # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh @@ -289,9 +289,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/google/cel-cpp/archive/4767e5de36c5701fa8ea46d7de3765161ef98353.tar.gz"], ), com_googlesource_code_re2 = dict( - sha256 = "b0382aa7369f373a0148218f2df5a6afd6bfa884ce4da2dfb576b979989e615e", - strip_prefix = "re2-2019-09-01", - urls = ["https://github.com/google/re2/archive/2019-09-01.tar.gz"], + sha256 = "7268e1b4254d9ffa5ccf010fee954150dbb788fd9705234442e7d9f0ee5a42d3", + strip_prefix = "re2-2019-12-01", + urls = ["https://github.com/google/re2/archive/2019-12-01.tar.gz"], ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the @@ -307,8 +307,9 @@ REPOSITORY_LOCATIONS = dict( urls = ["https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.76/fuzzit_Linux_x86_64.zip"], ), upb = dict( - sha256 = "61d0417abd60e65ed589c9deee7c124fe76a4106831f6ad39464e1525cef1454", - strip_prefix = "upb-9effcbcb27f0a665f9f345030188c0b291e32482", - urls = ["https://github.com/protocolbuffers/upb/archive/9effcbcb27f0a665f9f345030188c0b291e32482.tar.gz"], + sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", + strip_prefix = "upb-8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", + # 2019-11-19 + urls = ["https://github.com/protocolbuffers/upb/archive/8a3ae1ef3e3e3f26b45dec735c5776737fc7247f.tar.gz"], ), ) diff --git a/ci/verify_examples.sh b/ci/verify_examples.sh index 0902a4987872..4b9273ee052a 100755 --- a/ci/verify_examples.sh +++ b/ci/verify_examples.sh @@ -23,8 +23,8 @@ cd ../ # Test grpc bridge example # install go -curl -O https://storage.googleapis.com/golang/go1.13.3.linux-amd64.tar.gz -tar -xf go1.13.3.linux-amd64.tar.gz +curl -O https://storage.googleapis.com/golang/go1.13.5.linux-amd64.tar.gz +tar -xf go1.13.5.linux-amd64.tar.gz sudo mv go /usr/local export PATH=$PATH:/usr/local/go/bin export GOPATH=$HOME/go diff --git a/docs/root/_static/multilevel_deployment.svg b/docs/root/_static/multilevel_deployment.svg new file mode 100644 index 000000000000..ad2b3e12e806 --- /dev/null +++ b/docs/root/_static/multilevel_deployment.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/root/configuration/best_practices/best_practices.rst b/docs/root/configuration/best_practices/best_practices.rst index 259e6aefbd8e..51e423b6f303 100644 --- a/docs/root/configuration/best_practices/best_practices.rst +++ b/docs/root/configuration/best_practices/best_practices.rst @@ -5,3 +5,5 @@ Configuration best practices :maxdepth: 2 edge + level_two + diff --git a/docs/root/configuration/best_practices/level_two.rst b/docs/root/configuration/best_practices/level_two.rst new file mode 100644 index 000000000000..6bdf1c85e8bc --- /dev/null +++ b/docs/root/configuration/best_practices/level_two.rst @@ -0,0 +1,37 @@ +.. _best_practices_level2: + +Configuring Envoy as a level two proxy +====================================== + +Envoy is a production-ready proxy, however, the default settings that are tailored for the +edge use case may need to be adjusted when using Envoy in a multi-level deployment as a +"level two" HTTP/2 proxy. + +.. image:: /_static/multilevel_deployment.svg + +**In summary, if you run level two Envoy version 1.11.1 or greater which terminates +HTTP/2, we strongly advise you to change the HTTP/2 configuration of your level +two Envoy, by setting its downstream +:ref:`validation of HTTP/2 messaging option ` +to true.** + +If there is an invalid HTTP/2 request and this option is not set, the Envoy in +question will reset the entire connection. This behavior was changed as part of +the 1.11.1 security release, to increase the security of Edge Envoys. Unfortunately, +because there are no guarantees that edge proxies will enforce HTTP/1 or HTTP/2 +standards compliance as rigorously as Envoy’s HTTP/2 stack does, this can result +in a problem as follows. If one client sends a request that for example passes +level one proxy's validation checks, and it is forwarded over an upstream multiplexed +HTTP/2 connection (potentially shared with other clients) the strict enforcement on +the level two Envoy HTTP/2 will reset all the streams on that connection, causing +a service disruption to the clients sharing that L1-L2 connection. If a malicious +user has insight into what traffic will bypass level one checks, they could spray +“bad” traffic across the level one fleet, causing serious disruption to other users’ +traffic. + +Please note that the +:ref:`validation of HTTP/2 messaging option ` +is planned to be deprecated and replaced with mandatory configuration in the HttpConnectionManager, to ensure +that what is now an easily overlooked option would need to be configured, ideally +appropriately for the given Envoy deployment. Please refer to the +https://github.com/envoyproxy/envoy/issues/9285 for more information. diff --git a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst index 6e3943ff3700..bfbe0c191986 100644 --- a/docs/root/configuration/http/http_conn_man/traffic_splitting.rst +++ b/docs/root/configuration/http/http_conn_man/traffic_splitting.rst @@ -109,6 +109,7 @@ to each upstream cluster. - match: { prefix: / } route: weighted_clusters: + runtime_key_prefix: routing.traffic_split.helloworld clusters: - name: helloworld_v1 weight: 33 diff --git a/docs/root/faq/configuration/level_two.rst b/docs/root/faq/configuration/level_two.rst new file mode 100644 index 000000000000..72c46eecd378 --- /dev/null +++ b/docs/root/faq/configuration/level_two.rst @@ -0,0 +1,7 @@ +.. _faq_level2: + +How do I configure Envoy as a level two proxy? +============================================== + +Refer to :ref:`configuring Envoy as a level two proxy ` +for an example of the level 2 proxy configuration. diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index b30be9823d97..663a953435b8 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -27,6 +27,7 @@ Configuration :maxdepth: 2 configuration/edge + configuration/level_two configuration/sni configuration/zone_aware_routing configuration/zipkin_tracing diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst index e4ef72578198..e1bc688af4db 100644 --- a/docs/root/intro/version_history.rst +++ b/docs/root/intro/version_history.rst @@ -37,6 +37,18 @@ Version history * tracing: added upstream_address tag. * udp: added initial support for :ref:`UDP proxy ` +1.12.2 (December 10, 2019) +========================== +* http: fixed CVE-2019-18801 by allocating sufficient memory for request headers. +* http: fixed CVE-2019-18802 by implementing stricter validation of HTTP/1 headers. +* http: trim LWS at the end of header keys, for correct HTTP/1.1 header parsing. +* http: added strict authority checking. This can be reversed temporarily by setting the runtime feature `envoy.reloadable_features.strict_authority_validation` to false. +* route config: fixed CVE-2019-18838 by checking for presence of host/path headers. + +1.12.1 (November 8, 2019) +========================= +* listener: fixed CVE-2019-18836 by clearing accept filters before connection creation. + 1.12.0 (October 31, 2019) ========================= * access log: added a new flag for :ref:`downstream protocol error `. diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index 1b1ca9d55e2c..ccf5778be575 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -137,6 +137,8 @@ struct ResponseCodeDetailValues { // indicates that original "success" headers may have been sent downstream // despite the subsequent failure. const std::string LateUpstreamReset = "upstream_reset_after_response_started"; + // The request was rejected due to invalid characters in the HOST/:authority header. + const std::string InvalidAuthority = "invalid_authority"; }; using ResponseCodeDetails = ConstSingleton; diff --git a/source/common/chromium_url/url_canon_internal.h b/source/common/chromium_url/url_canon_internal.h index bffff5c12f4a..8c405b49814a 100644 --- a/source/common/chromium_url/url_canon_internal.h +++ b/source/common/chromium_url/url_canon_internal.h @@ -112,17 +112,6 @@ inline void AppendEscapedChar(UINCHAR ch, CanonOutputT* output) { // UTF-8 functions ------------------------------------------------------------ -// Reads one character in UTF-8 starting at |*begin| in |str| and places -// the decoded value into |*code_point|. If the character is valid, we will -// return true. If invalid, we'll return false and put the -// kUnicodeReplacementCharacter into |*code_point|. -// -// |*begin| will be updated to point to the last character consumed so it -// can be incremented in a loop and will be ready for the next character. -// (for a single-byte ASCII character, it will not be changed). -COMPONENT_EXPORT(URL) -bool ReadUTFChar(const char* str, int* begin, int length, unsigned* code_point_out); - // Generic To-UTF-8 converter. This will call the given append method for each // character that should be appended, with the given output method. Wrappers // are provided below for escaped and non-escaped versions of this. @@ -176,37 +165,6 @@ inline void AppendUTF8EscapedValue(unsigned char_value, CanonOutput* output) { DoAppendUTF8(char_value, output); } -// Escaping functions --------------------------------------------------------- - -// Writes the given character to the output as UTF-8, escaped. Call this -// function only when the input is wide. Returns true on success. Failure -// means there was some problem with the encoding, we'll still try to -// update the |*begin| pointer and add a placeholder character to the -// output so processing can continue. -// -// We will append the character starting at ch[begin] with the buffer ch -// being |length|. |*begin| will be updated to point to the last character -// consumed (we may consume more than one for UTF-16) so that if called in -// a loop, incrementing the pointer will move to the next character. -// -// Every single output character will be escaped. This means that if you -// give it an ASCII character as input, it will be escaped. Some code uses -// this when it knows that a character is invalid according to its rules -// for validity. If you don't want escaping for ASCII characters, you will -// have to filter them out prior to calling this function. -// -// Assumes that ch[begin] is within range in the array, but does not assume -// that any following characters are. -inline bool AppendUTF8EscapedChar(const char* str, int* begin, int length, CanonOutput* output) { - // ReadUTF8Char will handle invalid characters for us and give us the - // kUnicodeReplacementCharacter, so we don't have to do special checking - // after failure, just pass through the failure to the caller. - unsigned ch; - bool success = ReadUTFChar(str, begin, length, &ch); - AppendUTF8EscapedValue(ch, output); - return success; -} - // Given a '%' character at |*begin| in the string |spec|, this will decode // the escaped value and put it into |*unescaped_value| on success (returns // true). On failure, this will return false, and will not write into diff --git a/source/common/chromium_url/url_canon_path.cc b/source/common/chromium_url/url_canon_path.cc index f8c803a9c5f5..22587c0ab8a1 100644 --- a/source/common/chromium_url/url_canon_path.cc +++ b/source/common/chromium_url/url_canon_path.cc @@ -274,15 +274,11 @@ bool DoPartialPath(const CHAR* spec, const Component& path, int path_begin_in_ou bool success = true; for (int i = path.begin; i < end; i++) { UCHAR uch = static_cast(spec[i]); - if (sizeof(CHAR) > 1 && uch >= 0x80) { - // We only need to test wide input for having non-ASCII characters. For - // narrow input, we'll always just use the lookup table. We don't try to - // do anything tricky with decoding/validating UTF-8. This function will - // read one or two UTF-16 characters and append the output as UTF-8. This - // call will be removed in 8-bit mode. - success &= AppendUTF8EscapedChar(spec, &i, end, output); - } else { - // Normal ASCII character or 8-bit input, use the lookup table. + // Chromium UTF8 logic is unneeded, as the missing templated result + // refers only to char const* (single-byte) characters at this time. + // This only trips up MSVC, since linux gcc seems to optimize it away. + // Indention is to avoid gratuitous diffs to origin source + { unsigned char out_ch = static_cast(uch); unsigned char flags = kPathCharLookup[out_ch]; if (flags & SPECIAL) { diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 0feaa12d4e21..3b72848c6eb5 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -766,6 +766,15 @@ void ConnectionManagerImpl::ActiveStream::decodeHeaders(HeaderMapPtr&& headers, } } + // Make sure the host is valid. + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.strict_authority_validation") && + !HeaderUtility::authorityIsValid(request_headers_->Host()->value().getStringView())) { + sendLocalReply(Grpc::Common::hasGrpcContentType(*request_headers_), Code::BadRequest, "", + nullptr, is_head_request_, absl::nullopt, + StreamInfo::ResponseCodeDetails::get().InvalidAuthority); + return; + } + // Currently we only support relative paths at the application layer. We expect the codec to have // broken the path into pieces if applicable. NOTE: Currently the HTTP/1.1 codec only does this // when the allow_absolute_url flag is enabled on the HCM. diff --git a/source/common/http/header_utility.cc b/source/common/http/header_utility.cc index 71b390121fbb..7bb74e0ea932 100644 --- a/source/common/http/header_utility.cc +++ b/source/common/http/header_utility.cc @@ -135,6 +135,11 @@ bool HeaderUtility::headerIsValid(const absl::string_view header_value) { header_value.size()) != 0); } +bool HeaderUtility::authorityIsValid(const absl::string_view header_value) { + return (nghttp2_check_authority(reinterpret_cast(header_value.data()), + header_value.size()) != 0); +} + void HeaderUtility::addHeaders(HeaderMap& headers, const HeaderMap& headers_to_add) { headers_to_add.iterate( [](const HeaderEntry& header, void* context) -> HeaderMap::Iterate { diff --git a/source/common/http/header_utility.h b/source/common/http/header_utility.h index 51c5dc659857..aa2a92e3ece3 100644 --- a/source/common/http/header_utility.h +++ b/source/common/http/header_utility.h @@ -96,6 +96,12 @@ class HeaderUtility { */ static bool headerIsValid(const absl::string_view header_value); + /** + * Validates that the characters in the authority are valid. + * @return bool true if the header values are valid, false otherwise. + */ + static bool authorityIsValid(const absl::string_view authority_value); + /** * Add headers from one HeaderMap to another * @param headers target where headers will be added diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 8f2d33d29d37..df25b453cbc5 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -305,7 +305,7 @@ void RequestStreamEncoderImpl::encodeHeaders(const HeaderMap& headers, bool end_ head_request_ = true; } connection_.onEncodeHeaders(headers); - connection_.reserveBuffer(std::max(4096U, path->value().size() + 4096)); + connection_.reserveBuffer(path->value().size() + method->value().size() + 4096); connection_.copyToBuffer(method->value().getStringView().data(), method->value().size()); connection_.addCharToBuffer(' '); connection_.copyToBuffer(path->value().getStringView().data(), path->value().size()); @@ -469,7 +469,9 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { return; } - const absl::string_view header_value = absl::string_view(data, length); + // Work around a bug in http_parser where trailing whitespace is not trimmed + // as the spec requires: https://tools.ietf.org/html/rfc7230#section-3.2.4 + const absl::string_view header_value = StringUtil::trim(absl::string_view(data, length)); if (strict_header_validation_) { if (!Http::HeaderUtility::headerIsValid(header_value)) { @@ -487,7 +489,7 @@ void ConnectionImpl::onHeaderValue(const char* data, size_t length) { } header_parsing_state_ = HeaderParsingState::Value; - current_header_value_.append(data, length); + current_header_value_.append(header_value.data(), header_value.length()); const uint32_t total = current_header_field_.size() + current_header_value_.size() + current_header_map_->byteSize(); diff --git a/source/common/network/dns_impl.cc b/source/common/network/dns_impl.cc index e40416dcda13..c134872b71e7 100644 --- a/source/common/network/dns_impl.cc +++ b/source/common/network/dns_impl.cc @@ -25,8 +25,7 @@ DnsResolverImpl::DnsResolverImpl( const bool use_tcp_for_dns_lookups) : dispatcher_(dispatcher), timer_(dispatcher.createTimer([this] { onEventCallback(ARES_SOCKET_BAD, 0); })) { - ares_options options; - memset(&options, 0, sizeof(options)); + ares_options options{}; int optmask = 0; if (use_tcp_for_dns_lookups) { diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index 3e83cfe9dba8..6b6bf0fe7294 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -388,6 +388,12 @@ bool RouteEntryImplBase::matchRoute(const Http::HeaderMap& headers, uint64_t random_value) const { bool matches = true; + // TODO(mattklein123): Currently all match types require a path header. When we support CONNECT + // we will need to figure out how to safely relax this. + if (headers.Path() == nullptr) { + return false; + } + matches &= evaluateRuntimeMatch(random_value); if (!matches) { // No need to waste further cycles calculating a route match. @@ -1070,6 +1076,11 @@ const VirtualHostImpl* RouteMatcher::findVirtualHost(const Http::HeaderMap& head return default_virtual_host_.get(); } + // There may be no authority in early reply paths in the HTTP connection manager. + if (headers.Host() == nullptr) { + return nullptr; + } + // TODO (@rshriram) Match Origin header in WebSocket // request with VHost, using wildcard match const std::string host = diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index d8bb32412732..483f4682e3cc 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -29,6 +29,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.buffer_filter_populate_content_length", "envoy.reloadable_features.outlier_detection_support_for_grpc_status", "envoy.reloadable_features.connection_header_sanitization", + "envoy.reloadable_features.strict_header_validation", + "envoy.reloadable_features.strict_authority_validation", }; // This is a list of configuration fields which are disallowed by default in Envoy diff --git a/source/common/runtime/runtime_impl.cc b/source/common/runtime/runtime_impl.cc index 9da583bdb8db..6ce1638ca4fb 100644 --- a/source/common/runtime/runtime_impl.cc +++ b/source/common/runtime/runtime_impl.cc @@ -24,9 +24,28 @@ namespace Envoy { namespace Runtime { +namespace { + +// Before ASSERTS were added to ensure runtime features were restricted to +// booleans, several integer features were added. isLegacyFeatures exempts +// existing integer features from the checks until they can be cleaned up. +// This includes +// envoy.reloadable_features.max_[request|response]_headers_count from +// include/envoy/http/codec.h as well as the http2_protocol_options overrides in +// source/common/http/http2/codec_impl.cc +bool isLegacyFeature(absl::string_view feature) { + return absl::StartsWith(feature, "envoy.reloadable_features.http2_protocol_options.") || + absl::StartsWith(feature, "envoy.reloadable_features.max_re"); +} + +bool isRuntimeFeature(absl::string_view feature) { + return absl::StartsWith(feature, "envoy.reloadable_features."); +} + +} // namespace bool runtimeFeatureEnabled(absl::string_view feature) { - ASSERT(absl::StartsWith(feature, "envoy.reloadable_features")); + ASSERT(isRuntimeFeature(feature)); if (Runtime::LoaderSingleton::getExisting()) { return Runtime::LoaderSingleton::getExisting()->threadsafeSnapshot()->runtimeFeatureEnabled( feature); @@ -220,6 +239,7 @@ bool SnapshotImpl::featureEnabled(const std::string& key, uint64_t default_value } const std::string& SnapshotImpl::get(const std::string& key) const { + ASSERT(!isRuntimeFeature(key)); // Make sure runtime guarding is only used for getBoolean auto entry = values_.find(key); if (entry == values_.end()) { return EMPTY_STRING; @@ -261,6 +281,7 @@ bool SnapshotImpl::featureEnabled(const std::string& key, } uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value) const { + ASSERT(isLegacyFeature(key) || !isRuntimeFeature(key)); auto entry = values_.find(key); if (entry == values_.end() || !entry->second.uint_value_) { return default_value; @@ -270,6 +291,7 @@ uint64_t SnapshotImpl::getInteger(const std::string& key, uint64_t default_value } double SnapshotImpl::getDouble(const std::string& key, double default_value) const { + ASSERT(!isRuntimeFeature(key)); // Make sure runtime guarding is only used for getBoolean auto entry = values_.find(key); if (entry == values_.end() || !entry->second.double_value_) { return default_value; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index e8688c9edcf0..398e47edc97f 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -480,7 +480,7 @@ bool ClusterManagerImpl::scheduleUpdate(const Cluster& cluster, uint32_t priorit } // Ensure there's a timer set to deliver these updates. - if (!updates->timer_enabled_) { + if (!updates->timer_->enabled()) { updates->enableTimer(timeout); } @@ -500,7 +500,6 @@ void ClusterManagerImpl::applyUpdates(const Cluster& cluster, uint32_t priority, postThreadLocalClusterUpdate(cluster, priority, hosts_added, hosts_removed); cm_stats_.cluster_updated_via_merge_.inc(); - updates.timer_enabled_ = false; updates.last_updated_ = time_source_.monotonicTime(); } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index e4d8830925a2..83f463afd777 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -407,24 +407,22 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggableenabled()); timer_->enableTimer(std::chrono::milliseconds(timeout)); - timer_enabled_ = true; } } bool disableTimer() { - const bool was_enabled = timer_enabled_; - if (timer_ != nullptr) { - timer_->disableTimer(); - timer_enabled_ = false; + if (timer_ == nullptr) { + return false; } + + const bool was_enabled = timer_->enabled(); + timer_->disableTimer(); return was_enabled; } Event::TimerPtr timer_; - // TODO(rgs1): this should be part of Event::Timer's interface. - bool timer_enabled_{}; // This is default constructed to the clock's epoch: // https://en.cppreference.com/w/cpp/chrono/time_point/time_point // diff --git a/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5726642969772032 b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5726642969772032 new file mode 100644 index 000000000000..f8d96b4c26af --- /dev/null +++ b/test/common/http/codec_impl_corpus/clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5726642969772032 @@ -0,0 +1,12 @@ +actions { new_stream { request_headers { headers { key: ":method" value: " _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr ke n key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr ke n key:e ke new_st e new_st kr ke key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_s ke n key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr ke n key:e ke new_st e new_st kr ke key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr n key:e ke new_st e new_st kr ke key:e ke new_st e new_st kr ke n _h new_st t_he ke new_st e new_st kr n key:e ke new_st e new_st kr ke key:e ke new_st e ke key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr n key:e ke new_st e new_st kr ke key:e ke new_st e new_st kr ke n _h new_st t_he ke new_st e new_st kr n key:e ke new_st e new_st kr ke key:e ke new_st e new_st kr ke n _he ke new_st e new_st kr ke st e new_st t_he ke new_st e new_st kr ke n key:e ke he ke new_st e new_st kr ke st e new_st t_he ket_he ke new_st e new_st kr e new_st t_he ke new_st e new_st kr ke n key:e ke newstrey:_]E]u___ }\n}," + } + headers { + key: ":method" + value: "GETactions {\n muta{\n ketruest_he key: ctions {\n ers {\n headers {\n key: ctions {\n new_streamTnrtasfTkey: ctioew: new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers {\n he: r-e key: ctionsest_heade headers {u key: cti new_streew_stream {asfer-e key: c{u amrtasfer-headers {headers {\n headers {u new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers {\n he: r-e key: ctionsest_heade headers {u key: cti new_streew_stream {asfer-e key: c{u amTkey: ctioew: new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers {\n headers headers {u new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers {\n he: r-e key: ctionsest_heade headers {u key: cti headers {u new_stream {asfer-e key: ctioew: r-e oew: r-e key: ctionsest_headers {\n he: r-e key: ctionsest_heade headers {u key: cti new_streew_stream {asfer-e key: c{u amTkey: ctioew: new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers new_stream {asfer-e key: ctioew: r-ede headers {u key: cti new_streesfer-headest_heade headers {u key: cti headers {u new_stream {asfer-e key: ctioew: r-e oew: r-e key: ctionsest_headers {\n he: r-e key: ctionsest_heade headers {u key: cti new_streew_stream {asfer-e key: c{u amTkey: ctioew: new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers new_stream {asfer-e key: ctioew: r-ede headers {u key: cti new_streesfer-headers {@ headers {u new_stream {asfer-e key: ctioew: \"Tnrtasfer-e key: ction: cti new_stream {reew_stream {asfer-e key: c{u amTkey: ctioew: new_stream {asfer-e key: ctioew: r-e key: ctionsest_headers new_stream {asfer-e key: ctioew: r-ede headers {u key: cti new_streesfer-headers {@ headers {u new_stream {asfer-e key: ctioew: \"Tnrtasfer-e key: ction: cti new_stream {\n new_streame: s {\n new_streamTnrtasfe " + } + headers { + key: ":path" + } + } + } +} diff --git a/test/common/http/header_utility_test.cc b/test/common/http/header_utility_test.cc index 9b03bc64825e..a54dd4c10ffc 100644 --- a/test/common/http/header_utility_test.cc +++ b/test/common/http/header_utility_test.cc @@ -464,6 +464,13 @@ TEST(HeaderIsValidTest, ValidHeaderValuesAreAccepted) { EXPECT_TRUE(HeaderUtility::headerIsValid("Some Other Value")); } +TEST(HeaderIsValidTest, AuthIsValid) { + EXPECT_TRUE(HeaderUtility::authorityIsValid("strangebutlegal$-%&'")); + EXPECT_FALSE(HeaderUtility::authorityIsValid("illegal{}")); + // Full checks are done by Http2CodecImplTest.CheckAuthority, cross checking + // against nghttp2 compliance. +} + TEST(HeaderAddTest, HeaderAdd) { TestHeaderMapImpl headers{{"myheader1", "123value"}}; TestHeaderMapImpl headers_to_add{{"myheader2", "456value"}}; diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index f6fde28db2a2..cc77c9370984 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -61,6 +61,25 @@ class Http1ServerConnectionImplTest : public testing::Test { void testRequestHeadersExceedLimit(std::string header_string); void testRequestHeadersAccepted(std::string header_string); + // Send the request, and validate the received request headers. + // Then send a response just to clean up. + void sendAndValidateRequestAndSendResponse(absl::string_view raw_request, + const TestHeaderMapImpl& expected_request_headers) { + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .Times(1) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + EXPECT_CALL(decoder, decodeHeaders_(HeaderMapEqual(&expected_request_headers), true)).Times(1); + Buffer::OwnedImpl buffer(raw_request); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + response_encoder->encodeHeaders(TestHeaderMapImpl{{":status", "200"}}, true); + } + protected: uint32_t max_request_headers_kb_{Http::DEFAULT_MAX_REQUEST_HEADERS_KB}; uint32_t max_request_headers_count_{Http::DEFAULT_MAX_HEADERS_COUNT}; @@ -167,6 +186,23 @@ TEST_F(Http1ServerConnectionImplTest, EmptyHeader) { EXPECT_EQ(0U, buffer.length()); } +TEST_F(Http1ServerConnectionImplTest, HostWithLWS) { + initialize(); + + TestHeaderMapImpl expected_headers{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}; + + // Regression test spaces before and after the host header value. + sendAndValidateRequestAndSendResponse("GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); + + // Regression test tabs before and after the host header value. + sendAndValidateRequestAndSendResponse("GET / HTTP/1.1\r\nHost: host \r\n\r\n", + expected_headers); + + // Regression test mixed spaces and tabs before and after the host header value. + sendAndValidateRequestAndSendResponse( + "GET / HTTP/1.1\r\nHost: host \r\n\r\n", expected_headers); +} + TEST_F(Http1ServerConnectionImplTest, Http10) { initialize(); @@ -358,6 +394,22 @@ TEST_F(Http1ServerConnectionImplTest, BadRequestNoStream) { EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); } +// This behavior was observed during CVE-2019-18801 and helped to limit the +// scope of affected Envoy configurations. +TEST_F(Http1ServerConnectionImplTest, RejectInvalidMethod) { + initialize(); + + Http::MockStreamDecoder decoder; + EXPECT_CALL(callbacks_, newStream(_, _)).WillOnce(ReturnRef(decoder)); + + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + + Buffer::OwnedImpl buffer("BAD / HTTP/1.1\r\nHost: foo\r\n"); + EXPECT_THROW(codec_->dispatch(buffer), CodecProtocolException); + EXPECT_EQ("HTTP/1.1 400 Bad Request\r\ncontent-length: 0\r\nconnection: close\r\n\r\n", output); +} + TEST_F(Http1ServerConnectionImplTest, BadRequestStartedStream) { initialize(); @@ -559,6 +611,33 @@ TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponse) { EXPECT_EQ("HTTP/1.1 200 OK\r\ncontent-length: 0\r\n\r\n", output); } +// As with Http1ClientConnectionImplTest.LargeHeaderRequestEncode but validate +// the response encoder instead of request encoder. +TEST_F(Http1ServerConnectionImplTest, LargeHeaderResponseEncode) { + initialize(); + + NiceMock decoder; + Http::StreamEncoder* response_encoder = nullptr; + EXPECT_CALL(callbacks_, newStream(_, _)) + .WillOnce(Invoke([&](Http::StreamEncoder& encoder, bool) -> Http::StreamDecoder& { + response_encoder = &encoder; + return decoder; + })); + + Buffer::OwnedImpl buffer("GET / HTTP/1.1\r\n\r\n"); + codec_->dispatch(buffer); + EXPECT_EQ(0U, buffer.length()); + + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + + const std::string long_header_value = std::string(79 * 1024, 'a'); + TestHeaderMapImpl headers{{":status", "200"}, {"foo", long_header_value}}; + response_encoder->encodeHeaders(headers, true); + EXPECT_EQ("HTTP/1.1 200 OK\r\nfoo: " + long_header_value + "\r\ncontent-length: 0\r\n\r\n", + output); +} + TEST_F(Http1ServerConnectionImplTest, HeaderOnlyResponseTrainProperHeaders) { codec_settings_.header_key_format_ = Http1Settings::HeaderKeyFormat::ProperCase; initialize(); @@ -1428,6 +1507,56 @@ TEST_F(Http1ClientConnectionImplTest, LargeResponseHeadersAccepted) { codec_->dispatch(buffer); } +// Regression test for CVE-2019-18801. Large method headers should not trigger +// ASSERTs or ASAN, which they previously did. +TEST_F(Http1ClientConnectionImplTest, LargeMethodRequestEncode) { + initialize(); + + NiceMock response_decoder; + const std::string long_method = std::string(79 * 1024, 'a'); + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + TestHeaderMapImpl headers{{":method", long_method}, {":path", "/"}, {":authority", "host"}}; + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + request_encoder.encodeHeaders(headers, true); + EXPECT_EQ(long_method + " / HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); +} + +// As with LargeMethodEncode, but for the path header. This was not an issue +// in CVE-2019-18801, but the related code does explicit size calculations on +// both path and method (these are the two distinguished headers). So, +// belt-and-braces. +TEST_F(Http1ClientConnectionImplTest, LargePathRequestEncode) { + initialize(); + + NiceMock response_decoder; + const std::string long_path = std::string(79 * 1024, '/'); + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + TestHeaderMapImpl headers{{":method", "GET"}, {":path", long_path}, {":authority", "host"}}; + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + request_encoder.encodeHeaders(headers, true); + EXPECT_EQ("GET " + long_path + " HTTP/1.1\r\nhost: host\r\ncontent-length: 0\r\n\r\n", output); +} + +// As with LargeMethodEncode, but for an arbitrary header. This was not an issue +// in CVE-2019-18801. +TEST_F(Http1ClientConnectionImplTest, LargeHeaderRequestEncode) { + initialize(); + + NiceMock response_decoder; + Http::StreamEncoder& request_encoder = codec_->newStream(response_decoder); + const std::string long_header_value = std::string(79 * 1024, 'a'); + TestHeaderMapImpl headers{ + {":method", "GET"}, {"foo", long_header_value}, {":path", "/"}, {":authority", "host"}}; + std::string output; + ON_CALL(connection_, write(_, _)).WillByDefault(AddBufferToString(&output)); + request_encoder.encodeHeaders(headers, true); + EXPECT_EQ("GET / HTTP/1.1\r\nhost: host\r\nfoo: " + long_header_value + + "\r\ncontent-length: 0\r\n\r\n", + output); +} + // Exception called when the number of response headers exceeds the default value of 100. TEST_F(Http1ClientConnectionImplTest, ManyResponseHeadersRejected) { initialize(); diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 3a93f99146e4..ded96525c02b 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -19,6 +19,7 @@ envoy_cc_test( "//source/common/event:dispatcher_lib", "//source/common/http:exception_lib", "//source/common/http:header_map_lib", + "//source/common/http:header_utility_lib", "//source/common/http/http2:codec_lib", "//source/common/stats:stats_lib", "//test/common/http:common_lib", diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index c2784c8dcf19..be111b5a8a19 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -1090,6 +1090,25 @@ TEST_P(Http2CodecImplTest, LargeRequestHeadersAccepted) { request_encoder_->encodeHeaders(request_headers, false); } +// This is the HTTP/2 variant of the HTTP/1 regression test for CVE-2019-18801. +// Large method headers should not trigger ASSERTs or ASAN. The underlying issue +// in CVE-2019-18801 only affected the HTTP/1 encoder, but we include a test +// here for belt-and-braces. This also demonstrates that the HTTP/2 codec will +// accept arbitrary :method headers, unlike the HTTP/1 codec (see +// Http1ServerConnectionImplTest.RejectInvalidMethod for comparison). +TEST_P(Http2CodecImplTest, LargeMethodRequestEncode) { + max_request_headers_kb_ = 80; + initialize(); + + const std::string long_method = std::string(79 * 1024, 'a'); + TestHeaderMapImpl request_headers; + HttpTestUtility::addDefaultHeaders(request_headers); + request_headers.setReferenceKey(Headers::get().Method, long_method); + EXPECT_CALL(request_decoder_, decodeHeaders_(HeaderMapEqual(&request_headers), false)); + EXPECT_CALL(server_stream_callbacks_, onResetStream(_, _)).Times(0); + request_encoder_->encodeHeaders(request_headers, false); +} + // Tests stream reset when the number of request headers exceeds the default maximum of 100. TEST_P(Http2CodecImplTest, ManyRequestHeadersInvokeResetStream) { initialize(); diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 9603e8d2748b..ab1f4e26a21a 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -880,8 +880,7 @@ TEST_P(DnsImplAresFlagsForTcpTest, TcpLookupsEnabled) { server_->addCName("root.cnam.domain", "result.cname.domain"); server_->addHosts("result.cname.domain", {"201.134.56.7"}, RecordType::A); std::list address_list; - struct ares_options opts; - memset(&opts, 0, sizeof(opts)); + ares_options opts{}; int optmask = 0; EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask)); EXPECT_TRUE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC); @@ -909,8 +908,7 @@ TEST_P(DnsImplAresFlagsForUdpTest, UdpLookupsEnabled) { server_->addCName("root.cnam.domain", "result.cname.domain"); server_->addHosts("result.cname.domain", {"201.134.56.7"}, RecordType::A); std::list address_list; - struct ares_options opts; - memset(&opts, 0, sizeof(opts)); + ares_options opts{}; int optmask = 0; EXPECT_EQ(ARES_SUCCESS, ares_save_options(peer_->channel(), &opts, &optmask)); EXPECT_FALSE((opts.flags & ARES_FLAG_USEVC) == ARES_FLAG_USEVC); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index 3d5216e82459..0d12eba6b2b7 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -648,6 +648,17 @@ TEST_F(RouteMatcherTest, TestRoutes) { NiceMock stream_info; TestConfigImpl config(parseRouteConfigurationFromV2Yaml(yaml), factory_context_, true); + // No host header, no x-forwarded-proto and no path header testing. + EXPECT_EQ(nullptr, config.route(Http::TestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, 0)); + EXPECT_EQ( + nullptr, + config.route( + Http::TestHeaderMapImpl{{":authority", "foo"}, {":path", "/"}, {":method", "GET"}}, 0)); + EXPECT_EQ(nullptr, config.route(Http::TestHeaderMapImpl{{":authority", "foo"}, + {":method", "CONNECT"}, + {"x-forwarded-proto", "http"}}, + 0)); + // Base routing testing. EXPECT_EQ("instant-server", config.route(genHeaders("api.lyft.com", "/", "GET"), 0)->routeEntry()->clusterName()); diff --git a/test/common/router/route_fuzz_test.cc b/test/common/router/route_fuzz_test.cc index cf28aeb5f3a5..34df71b6b0a3 100644 --- a/test/common/router/route_fuzz_test.cc +++ b/test/common/router/route_fuzz_test.cc @@ -84,17 +84,6 @@ DEFINE_PROTO_FUZZER(const test::common::router::RouteTestCase& input) { ConfigImpl config(cleanRouteConfig(input.config()), factory_context, ProtobufMessage::getNullValidationVisitor(), true); Http::TestHeaderMapImpl headers = Fuzz::fromHeaders(input.headers()); - // It's a precondition of routing that {:authority, :path, x-forwarded-proto} headers exists, - // HCM enforces this. - if (headers.Host() == nullptr) { - headers.setHost("example.com"); - } - if (headers.Path() == nullptr) { - headers.setPath("/"); - } - if (headers.ForwardedProto() == nullptr) { - headers.setForwardedProto("http"); - } auto route = config.route(headers, stream_info, input.random_value()); if (route != nullptr && route->routeEntry() != nullptr) { route->routeEntry()->finalizeRequestHeaders(headers, stream_info, true); diff --git a/test/extensions/filters/http/lua/lua_integration_test.cc b/test/extensions/filters/http/lua/lua_integration_test.cc index 36f30b08702e..043d2d33dde5 100644 --- a/test/extensions/filters/http/lua/lua_integration_test.cc +++ b/test/extensions/filters/http/lua/lua_integration_test.cc @@ -21,7 +21,7 @@ class LuaIntegrationTest : public testing::TestWithParammutable_virtual_hosts(0) ->mutable_routes(0) ->mutable_match() ->set_prefix("/test/long/url"); + hcm.mutable_route_config()->mutable_virtual_hosts(0)->set_domains(0, domain); auto* new_route = hcm.mutable_route_config()->mutable_virtual_hosts(0)->add_routes(); new_route->mutable_match()->set_prefix("/alt/route"); new_route->mutable_route()->set_cluster("alt_cluster"); @@ -96,6 +97,32 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, LuaIntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// Regression test for pulling route info during early local replies using the Lua filter +// metadata() API. Covers both the upgrade required and no authority cases. +TEST_P(LuaIntegrationTest, CallMetadataDuringLocalReply) { + const std::string FILTER_AND_CODE = + R"EOF( +name: envoy.lua +typed_config: + "@type": type.googleapis.com/envoy.config.filter.http.lua.v2.Lua + inline_code: | + function envoy_on_response(response_handle) + local metadata = response_handle:metadata():get("foo.bar") + if metadata == nil then + end + end +)EOF"; + + initializeFilter(FILTER_AND_CODE, "foo"); + std::string response; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.0\r\n\r\n", &response, true); + EXPECT_TRUE(response.find("HTTP/1.1 426 Upgrade Required\r\n") == 0); + + response = ""; + sendRawHttpAndWaitForResponse(lookupPort("http"), "GET / HTTP/1.1\r\n\r\n", &response, true); + EXPECT_TRUE(response.find("HTTP/1.1 400 Bad Request\r\n") == 0); +} + // Basic request and response. TEST_P(LuaIntegrationTest, RequestAndResponse) { const std::string FILTER_AND_CODE = diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 0405b820d1af..0c044bcb46cf 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -24,6 +24,7 @@ #include "common/common/thread.h" #include "common/grpc/codec.h" #include "common/grpc/common.h" +#include "common/http/exception.h" #include "common/network/connection_balancer_impl.h" #include "common/network/filter_impl.h" #include "common/network/listen_socket_impl.h" @@ -437,10 +438,24 @@ class FakeHttpConnection : public Http::ServerConnectionCallbacks, public FakeCo // Network::ReadFilter Network::FilterStatus onData(Buffer::Instance& data, bool) override { - parent_.codec_->dispatch(data); + try { + parent_.codec_->dispatch(data); + } catch (const Http::CodecProtocolException& e) { + ENVOY_LOG(debug, "FakeUpstream dispatch error: {}", e.what()); + // We don't do a full stream shutdown like HCM, but just shutdown the + // connection for now. + read_filter_callbacks_->connection().close( + Network::ConnectionCloseType::FlushWriteAndDelay); + } return Network::FilterStatus::StopIteration; } + void + initializeReadFilterCallbacks(Network::ReadFilterCallbacks& read_filter_callbacks) override { + read_filter_callbacks_ = &read_filter_callbacks; + } + + Network::ReadFilterCallbacks* read_filter_callbacks_{}; FakeHttpConnection& parent_; }; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index c65cbabdfd99..d9aadffa1f4a 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -446,13 +446,15 @@ TEST_P(IntegrationTest, TestInlineHeaders) { // Verify for HTTP/1.0 a keep-alive header results in no connection: close. // Also verify existing host headers are passed through for the HTTP/1.0 case. -TEST_P(IntegrationTest, Http10WithHostandKeepAlive) { +// This also regression tests proper handling of trailing whitespace after key +// values, specifically the host header. +TEST_P(IntegrationTest, Http10WithHostandKeepAliveAndLws) { autonomous_upstream_ = true; config_helper_.addConfigModifier(&setAllowHttp10WithDefaultHost); initialize(); std::string response; sendRawHttpAndWaitForResponse(lookupPort("http"), - "GET / HTTP/1.0\r\nHost: foo.com\r\nConnection:Keep-alive\r\n\r\n", + "GET / HTTP/1.0\r\nHost: foo.com \r\nConnection:Keep-alive\r\n\r\n", &response, true); EXPECT_THAT(response, HasSubstr("HTTP/1.0 200 OK\r\n")); EXPECT_THAT(response, Not(HasSubstr("connection: close"))); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index e2bc8f8d3cd6..39e98f3e5ffb 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -1065,6 +1065,55 @@ TEST_P(DownstreamProtocolIntegrationTest, ManyTrailerHeaders) { EXPECT_EQ("200", response->headers().Status()->value().getStringView()); } +// Regression tests for CVE-2019-18801. We only validate the behavior of large +// :method request headers, since the case of other large headers is +// covered in the various testLargeRequest-based integration tests here. +// +// The table below describes the expected behaviors (in addition we should never +// see an ASSERT or ASAN failure trigger). +// +// Downstream Upstream Behavior expected +// ------------------------------------------ +// H1 H1 Envoy will reject (HTTP/1 codec behavior) +// H1 H2 Envoy will reject (HTTP/1 codec behavior) +// H2 H1 Envoy will forward but backend will reject (HTTP/1 +// codec behavior) +// H2 H2 Success +TEST_P(ProtocolIntegrationTest, LargeRequestMethod) { + const std::string long_method = std::string(48 * 1024, 'a'); + const Http::TestHeaderMapImpl request_headers{{":method", long_method}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + auto encoder_decoder = codec_client_->startRequest(request_headers); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + codec_client_->waitForDisconnect(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + } else { + ASSERT(downstreamProtocol() == Http::CodecClient::Type::HTTP2); + if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + response->waitForEndStream(); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + } else { + ASSERT(upstreamProtocol() == FakeHttpConnection::Type::HTTP2); + auto response = + sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + EXPECT_TRUE(response->complete()); + } + } +} + // Tests StopAllIterationAndBuffer. Verifies decode-headers-return-stop-all-filter calls decodeData // once after iteration is resumed. TEST_P(DownstreamProtocolIntegrationTest, testDecodeHeadersReturnsStopAll) { @@ -1418,6 +1467,32 @@ TEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) { test_server_->waitForCounterGe("http.config_test.downstream_cx_max_duration_reached", 1); } +// Make sure that invalid authority headers get blocked at or before the HCM. +TEST_P(DownstreamProtocolIntegrationTest, InvalidAuth) { + initialize(); + fake_upstreams_[0]->set_allow_unexpected_disconnects(true); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + Http::TestHeaderMapImpl request_headers{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "ho|st|"}}; + + auto response = codec_client_->makeHeaderOnlyRequest(request_headers); + if (downstreamProtocol() == Http::CodecClient::Type::HTTP1) { + // For HTTP/1 this is handled by the HCM, which sends a full 400 response. + response->waitForEndStream(); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("400", response->headers().Status()->value().getStringView()); + } else { + // For HTTP/2 this is handled by nghttp2 which resets the connection without + // sending an HTTP response. + codec_client_->waitForDisconnect(); + ASSERT_FALSE(response->complete()); + } +} + // For tests which focus on downstream-to-Envoy behavior, and don't need to be // run with both HTTP/1 and HTTP/2 upstreams. INSTANTIATE_TEST_SUITE_P(Protocols, DownstreamProtocolIntegrationTest, diff --git a/tools/check_format.py b/tools/check_format.py index f555b74e2406..d06cb169a27d 100755 --- a/tools/check_format.py +++ b/tools/check_format.py @@ -878,6 +878,11 @@ def checkErrorMessages(error_messages): # error_messages. def ownedDirectories(error_messages): owned = [] + maintainers = [ + '@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@junr03', + '@dnoe', '@dio', '@jmarantz' + ] + try: with open('./CODEOWNERS') as f: for line in f: @@ -890,6 +895,11 @@ def ownedDirectories(error_messages): if len(owners) < 2: error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n" " {}".format(line)) + maintainer = len(set(owners).intersection(set(maintainers))) > 0 + if not maintainer: + error_messages.append("Extensions require at least one maintainer OWNER:\n" + " {}".format(line)) + return owned except IOError: return [] # for the check format tests. diff --git a/tools/check_spelling.sh b/tools/check_spelling.sh index 7fa31c5b314d..7597e80e7777 100755 --- a/tools/check_spelling.sh +++ b/tools/check_spelling.sh @@ -78,5 +78,5 @@ SPELLING_WHITELIST_WORDS_FILE="${ROOTDIR}/tools/spelling_whitelist_words.txt" WHITELIST_WORDS=$(echo -n $(cat "${SPELLING_WHITELIST_WORDS_FILE}" | \ grep -v "^#"|grep -v "^$") | tr ' ' ',') SKIP_FILES=$(echo $(cat "${SPELLING_SKIP_FILES}") | sed "s| | -e |g") -git ls-files | grep -v -e "${SKIP_FILES}" | xargs "${TMP_DIR}/misspell" -i \ +git ls-files | grep -v -e ${SKIP_FILES} | xargs "${TMP_DIR}/misspell" -i \ "${WHITELIST_WORDS}" ${MISSPELL_ARGS} diff --git a/tools/proto_sync.py b/tools/proto_sync.py index 92b7a2cad7b8..f3520584fca0 100755 --- a/tools/proto_sync.py +++ b/tools/proto_sync.py @@ -120,7 +120,10 @@ def SyncV3Alpha(cmd, src_labels): continue # Skip unversioned package namespaces. TODO(htuch): fix this to use the type # DB and proper upgrade paths. - if 'v2' in dst: + if 'v1' in dst: + dst = re.sub('v1alpha\d?|v1', 'v3alpha', dst) + SyncProtoFile(cmd, src, dst) + elif 'v2' in dst: dst = re.sub('v2alpha\d?|v2', 'v3alpha', dst) SyncProtoFile(cmd, src, dst) elif 'envoy/type/matcher' in dst: @@ -149,8 +152,8 @@ def GetImportDeps(proto_path): # We can ignore imports provided implicitly by api_proto_package(). if any(import_path.startswith(p) for p in API_BUILD_SYSTEM_IMPORT_PREFIXES): continue - # Special case handling for in-built versioning annotations. - if import_path == 'udpa/annotations/versioning.proto': + # Special case handling for UDPA annotations. + if import_path.startswith('udpa/annotations/'): imports.append('@com_github_cncf_udpa//udpa/annotations:pkg') continue # Explicit remapping for external deps, compute paths for envoy/*. diff --git a/tools/protoxform/migrate.py b/tools/protoxform/migrate.py index 6713f969248a..71ddc210a8a2 100644 --- a/tools/protoxform/migrate.py +++ b/tools/protoxform/migrate.py @@ -8,6 +8,7 @@ from tools.protoxform import options from tools.protoxform import utils +from udpa.annotations import migrate_pb2 from google.api import annotations_pb2 ENVOY_COMMENT_WITH_TYPE_REGEX = re.compile('') @@ -72,6 +73,17 @@ def _Deprecate(self, proto, field_or_value): proto.reserved_name.append(field_or_value.name) options.AddHideOption(field_or_value.options) + def _Rename(self, proto, migrate_annotation): + """Rename a field/enum/service/message + + Args: + proto: DescriptorProto or corresponding proto message + migrate_annotation: udpa.annotations.MigrateAnnotation message + """ + if migrate_annotation.rename: + proto.name = migrate_annotation.rename + migrate_annotation.rename = "" + def VisitService(self, service_proto, type_context): upgraded_proto = copy.deepcopy(service_proto) for m in upgraded_proto.method: @@ -98,6 +110,8 @@ def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums): f.type_name = "" else: f.type_name = self._UpgradedType(f.type_name) + if f.options.HasExtension(migrate_pb2.field_migrate): + self._Rename(f, f.options.Extensions[migrate_pb2.field_migrate]) # Upgrade nested messages. del upgraded_proto.nested_type[:] upgraded_proto.nested_type.extend(nested_msgs) @@ -121,6 +135,11 @@ def VisitEnum(self, enum_proto, type_context): def VisitFile(self, file_proto, type_context, services, msgs, enums): upgraded_proto = copy.deepcopy(file_proto) + # Upgrade imports. + upgraded_proto.dependency[:] = [ + dependency for dependency in upgraded_proto.dependency + if dependency not in ("udpa/annotations/migrate.proto") + ] # Upgrade package. upgraded_proto.package = self._typedb.next_version_packages[upgraded_proto.package] upgraded_proto.name = self._typedb.next_version_proto_paths[upgraded_proto.name] diff --git a/tools/protoxform/protoxform.py b/tools/protoxform/protoxform.py index 8d489e3ea287..8319e8f57406 100755 --- a/tools/protoxform/protoxform.py +++ b/tools/protoxform/protoxform.py @@ -206,6 +206,8 @@ def CamelCase(s): elif d in ['udpa/annotations/versioning.proto']: # Skip, we decide to add this based on requires_versioning_import pass + elif d in ['udpa/annotations/migrate.proto']: + infra_imports.append(d) else: misc_imports.append(d) diff --git a/tools/spelling_skip_files.txt b/tools/spelling_skip_files.txt index 6ddcc10646b8..8351c72345a4 100644 --- a/tools/spelling_skip_files.txt +++ b/tools/spelling_skip_files.txt @@ -1 +1 @@ -OWNERS.md +OWNERS.md corpus diff --git a/tools/type_whisperer/typedb_gen.py b/tools/type_whisperer/typedb_gen.py index 0c3aee88aaa2..317912a2cae1 100644 --- a/tools/type_whisperer/typedb_gen.py +++ b/tools/type_whisperer/typedb_gen.py @@ -13,6 +13,7 @@ # Regexes governing v3alpha upgrades. TODO(htuch): The regex approach will have # to be rethought as we go beyond v3alpha, this is WiP. TYPE_UPGRADE_REGEXES = [ + (r'(envoy[\w\.]*\.)(v1alpha\d?|v1)', r'\1v3alpha'), (r'(envoy[\w\.]*\.)(v2alpha\d?|v2)', r'\1v3alpha'), # These are special cases, e.g. upgrading versionless packages. ('envoy\.type\.matcher', 'envoy.type.matcher.v3alpha'), @@ -21,6 +22,7 @@ # As with TYPE_UPGRADE_REGEXES but for API .proto paths. PATH_UPGRADE_REGEXES = [ + (r'(envoy/[\w/]*/)(v1alpha\d?|v1)', r'\1v3alpha'), (r'(envoy/[\w/]*/)(v2alpha\d?|v2)', r'\1v3alpha'), # These are special cases, e.g. upgrading versionless packages. ('envoy/type/matcher', 'envoy/type/matcher/v3alpha'),