diff --git a/.bazelrc b/.bazelrc index 4a9e2f9c319b..8685001b9b2c 100644 --- a/.bazelrc +++ b/.bazelrc @@ -47,6 +47,7 @@ build:sanitizer --test_tag_filters=-no_san # Common flags for Clang build:clang --action_env=BAZEL_COMPILER=clang +build:clang --action_env=CC=clang --action_env=CXX=clang++ build:clang --linkopt=-fuse-ld=lld # Flags for Clang + PCH @@ -79,11 +80,13 @@ build:clang-asan --linkopt -fuse-ld=lld build:clang-asan --linkopt --rtlib=compiler-rt build:clang-asan --linkopt --unwindlib=libgcc -# macOS ASAN/UBSAN +# macOS build:macos --cxxopt=-std=c++17 build:macos --action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin build:macos --host_action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin +build:macos --define tcmalloc=disabled +# macOS ASAN/UBSAN build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined diff --git a/api/bazel/external_deps.bzl b/api/bazel/external_deps.bzl index e8283e4fee10..d541512ce98b 100644 --- a/api/bazel/external_deps.bzl +++ b/api/bazel/external_deps.bzl @@ -71,7 +71,7 @@ USE_CATEGORIES = [ USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only", "api"] def _fail_missing_attribute(attr, key): - fail("The '%s' attribute must be defined for external dependecy " % attr + key) + fail("The '%s' attribute must be defined for external dependency " % attr + key) # Method for verifying content of the repository location specifications. # diff --git a/api/bazel/repository_locations.bzl b/api/bazel/repository_locations.bzl index 0e5d85aa3318..3adc1245a879 100644 --- a/api/bazel/repository_locations.bzl +++ b/api/bazel/repository_locations.bzl @@ -14,9 +14,9 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "protoc-gen-validate (PGV)", project_desc = "protoc plugin to generate polyglot message validators", project_url = "https://github.com/envoyproxy/protoc-gen-validate", - version = "0.6.1", - sha256 = "c695fc5a2e5a1b52904cd8a58ce7a1c3a80f7f50719496fd606e551685c01101", - release_date = "2021-04-26", + version = "0.6.2", + sha256 = "b02da533c77023238c556982507b9a71afc850478b637a7a13ec13f311efa5c0", + release_date = "2021-10-21", strip_prefix = "protoc-gen-validate-{version}", urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/v{version}.tar.gz"], use_category = ["api"], diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 34a8608e9e0e..ca54e6e841f3 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -112,9 +112,9 @@ message Cluster { // Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. - // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field - // and instead using the new load_balancing_policy field as the one and only mechanism for - // configuring this.] + // This has been deprecated in favor of using the :ref:`load_balancing_policy + // ` field without + // setting any value in :ref:`lb_policy`. LOAD_BALANCING_POLICY_CONFIG = 7; } @@ -1044,9 +1044,8 @@ message Cluster { // servers of this cluster. repeated Filter filters = 40; - // New mechanism for LB policy configuration. Used only if the - // :ref:`lb_policy` field has the value - // :ref:`LOAD_BALANCING_POLICY_CONFIG`. + // If this field is set and is supported by the client, it will supersede the value of + // :ref:`lb_policy`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/BUILD b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/BUILD new file mode 100644 index 000000000000..79f89dd96bb5 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/BUILD @@ -0,0 +1,16 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "@envoy_api//envoy/config/core/v3:pkg", + "@envoy_api//envoy/config/route/v3:pkg", + "@envoy_api//envoy/type/matcher/v3:pkg", + "@envoy_api//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) + diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/README.md b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/README.md new file mode 100644 index 000000000000..f6d267746cf0 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/README.md @@ -0,0 +1 @@ +Protocol buffer definitions for the MetaProtocol route configuration. diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/route.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/route.proto new file mode 100644 index 000000000000..a485ef4064ef --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/route.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package aeraki.meta_protocol_proxy.config.route.v1alpha; + +import "envoy/config/route/v3/route_components.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.config.route.v1alpha"; +option java_outer_classname = "MetaRouteConfigurationProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Meta Protocol Proxy Route Configuration] +// Meta Protocol proxy :ref:`configuration overview `. + +// [#next-free-field: 3] +message RouteConfiguration { + //The name of the route configuration. For example, it might match route_config_name in envoy.extensions.filters.network.meta_protocol_proxy.v1alpha.Rds. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message Route { + // Name for the route. + string name = 1; + + // Route matching parameters. + RouteMatch match = 2; + + // Route request to some upstream cluster. + RouteAction route = 3 [(validate.rules).message = {required: true}]; +} + +message RouteMatch { + // Specifies a set of key:value pairs in th metadata that the route should match on. The router will check the metadata + // populated by the codec plugin against all the specified key:value pairs in the route config. A match will happen if all the + // key:value pairs in the route are present in the request metadata with the same values (or based on presence if + // the value field is not in the config). + repeated envoy.config.route.v3.HeaderMatcher metadata = 1; +} + +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + envoy.config.route.v3.WeightedCluster weighted_clusters = 2; + } +} + diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/BUILD b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/README.md b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/README.md new file mode 100644 index 000000000000..7becc375e449 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/README.md @@ -0,0 +1 @@ +Protocol buffer definitions for the MetaProtocol router. diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/router.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/router.proto new file mode 100644 index 000000000000..3fad63066e6d --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v1alpha/router.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package aeraki.meta_protocol_proxy.filters.router.v1alpha; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.router.v1alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Router] +// MetaProtocol router :ref:`configuration overview `. + +message Router { +} diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/BUILD b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/router.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/router.proto new file mode 100644 index 000000000000..3fad63066e6d --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/router/v3alpha/router.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package aeraki.meta_protocol_proxy.filters.router.v1alpha; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.router.v1alpha"; +option java_outer_classname = "RouterProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Router] +// MetaProtocol router :ref:`configuration overview `. + +message Router { +} diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/BUILD b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/BUILD new file mode 100644 index 000000000000..b75ce5ba2497 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/BUILD @@ -0,0 +1,17 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//api/meta_protocol_proxy/config/route/v1alpha:pkg", + "@envoy_api//envoy/config/core/v3:pkg", + "@envoy_api//envoy/config/route/v3:pkg", + "@envoy_api//envoy/type/matcher/v3:pkg", + "@envoy_api//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) + diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/README.md b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/README.md new file mode 100644 index 000000000000..49e028dde4c5 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/README.md @@ -0,0 +1 @@ +Protocol buffer definitions for the MetaProtocol proxy. diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/meta_protocol_proxy.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/meta_protocol_proxy.proto new file mode 100644 index 000000000000..3f0819e35f45 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v1alpha/meta_protocol_proxy.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package aeraki.meta_protocol_proxy.v1alpha; + +import "envoy/config/core/v3/config_source.proto"; + +import "envoy/extensions/filters/network/meta_protocol_proxy/config/route/v1alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.v1alpha"; +option java_outer_classname = "MetaProtocolProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Meta Protocol proxy] +// Meta Protocol proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.meta_protocol_proxy] + +// [#next-free-field: 7] +message MetaProtocolProxy { + + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // The name of the application protocol built on top of meta protocol. + string application_protocol = 2 [(validate.rules).string = {min_len: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The meta protocol proxy’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the meta protocol proxy is static and is specified in this property. + config.route.v1alpha.RouteConfiguration route_config = 4; + } + + // The codec which encodes and decodes the application protocol. + Codec codec = 5; + + // A list of individual Layer-7 filters that make up the filter chain for requests made to + // the meta protocol proxy. Order matters as the filters are processed sequentially as + // request events happen. If no meta_protocol_filters are specified, a default router filter + // (`aeraki.meta_protocol.filters.router`) is used. + repeated MetaProtocolFilter meta_protocol_filters = 6; +} + +message Rds { + // Configuration source specifier for RDS. + envoy.config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; +} + +// MetaProtocolFilter configures a MetaProtocol filter. +message MetaProtocolFilter { + // The name of the filter to instantiate. The name must match a supported filter. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} + +message Codec { + // The name of the codec configuration. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Codec specific configuration which depends on the codec being instantiated. + google.protobuf.Any config = 2; +} + diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/BUILD b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/BUILD new file mode 100644 index 000000000000..c06bb7bc9a2f --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/BUILD @@ -0,0 +1,15 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "@envoy_api//envoy/config/core/v3:pkg", + "@envoy_api//envoy/config/route/v3:pkg", + "@envoy_api//envoy/type/matcher/v3:pkg", + "@envoy_api//envoy/type/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/meta_protocol_proxy.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/meta_protocol_proxy.proto new file mode 100644 index 000000000000..fc51f1060a2a --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/meta_protocol_proxy.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.meta_protocol_proxy.v3alpha; + +import "envoy/config/core/v3/config_source.proto"; + +import "envoy/extensions/filters/network/meta_protocol_proxy/route/v3alpha/route.proto"; + +import "google/protobuf/any.proto"; + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.v3alpha"; +option java_outer_classname = "MetaProtocolProxyProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Meta Protocol proxy] +// Meta Protocol proxy :ref:`configuration overview `. +// [#extension: envoy.filters.network.meta_protocol_proxy] + +// [#next-free-field: 7] +message MetaProtocolProxy { + // The human readable prefix to use when emitting statistics. + string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // The name of the application protocol built on top of meta protocol. + string application_protocol = 2 [(validate.rules).string = {min_len: 1}]; + + oneof route_specifier { + option (validate.required) = true; + + // The meta protocol proxy’s route table will be dynamically loaded via the RDS API. + Rds rds = 3; + + // The route table for the meta protocol proxy is static and is specified in this property. + config.route.v1alpha.RouteConfiguration route_config = 4; + } + + // The codec which encodes and decodes the application protocol. + Codec codec = 5; + + // A list of individual Layer-7 filters that make up the filter chain for requests made to + // the meta protocol proxy. Order matters as the filters are processed sequentially as + // request events happen. If no meta_protocol_filters are specified, a default router filter + // (`aeraki.meta_protocol.filters.router`) is used. + repeated MetaProtocolFilter meta_protocol_filters = 6; +} + +message Rds { + // Configuration source specifier for RDS. + envoy.config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; + + // The name of the route configuration. This name will be passed to the RDS + // API. This allows an Envoy configuration with multiple HTTP listeners (and + // associated HTTP connection manager filters) to use different route + // configurations. + string route_config_name = 2; +} + +// MetaProtocolFilter configures a MetaProtocol filter. +message MetaProtocolFilter { + // The name of the filter to instantiate. The name must match a supported filter. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Filter specific configuration which depends on the filter being + // instantiated. See the supported filters for further documentation. + google.protobuf.Any config = 2; +} + +message Codec { + // The name of the codec configuration. + string name = 1 [(validate.rules).string = {min_len: 1}]; + + // Codec specific configuration which depends on the codec being instantiated. + google.protobuf.Any config = 2; +} diff --git a/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/route.proto b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/route.proto new file mode 100644 index 000000000000..b241d87dc945 --- /dev/null +++ b/api/envoy/extensions/filters/network/meta_protocol_proxy/v3alpha/route.proto @@ -0,0 +1,101 @@ +syntax = "proto3"; + +package envoy.extensions.filters.network.meta_protocol_proxy.v3alpha; + +import "envoy/config/core/v3/base.proto"; +import "envoy/config/route/v3/route_components.proto"; + +import "google/protobuf/wrappers.proto" + +import "udpa/annotations/status.proto"; +import "validate/validate.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.network.meta_protocol_proxy.v3alpha"; +option java_outer_classname = "MetaRouteConfigurationProto"; +option java_multiple_files = true; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Meta Protocol Proxy Route Configuration] +// Meta Protocol proxy :ref:`configuration overview `. + +// [#next-free-field: 3] +message RouteConfiguration { + //The name of the route configuration. For example, it might match route_config_name in envoy.extensions.filters.network.meta_protocol_proxy.v1alpha.Rds. + string name = 1; + + // The list of routes that will be matched, in order, against incoming requests. The first route + // that matches will be used. + repeated Route routes = 2; +} + +message RetryPolicy { + // When certain conditions are met, the upstream request is retried. Supports retry when a + // network error occurs or when the response status is a specific status. + string retry_on = 1; + + // Maximum number of retries. For a downstream request, envoy may issue `1 + may_retry` + // upstream requests. + uint32 max_retry = 2; + + // If not specified, then Route.timeout will be used. + google.protobuf.Duration per_try_timeout = 3; +} + +message Route { + // Name for the route. + string name = 1; + + // Route matching parameters. + RouteMatch match = 2; + + // Route request to some upstream cluster. + RouteAction route = 3 [(validate.rules).message = {required: true}]; + + // [#not-implemented-hide:] + // Downstream request timeout. + google.protobuf.Duration timeout = 4; + + // [#not-implemented-hide:] + // Retry policy used by current route. + RetryPolicy retry = 5; + + // Route level config for L7 generic filters. The key should always be the generic filter name. + map per_filter_config = 6; + + // Route metadata. + config.core.v3.Metadata metadata = 7; +} + +message RouteMatch { + // Traget service name of downstream request. + google.protobuf.StringValue service = 2; + + // Used to match request path of downstream request. If request path does not match + // this rule, the entire request does not match the current route. + type.matcher.v3.StringMatcher path = 3; + + // Used to match request method of downstream request. If request method does not match + // this rule, the entire request does not match the current route. + type.matcher.v3.StringMatcher method = 4; + + // Specifies a set of key:value pairs in th metadata that the route should match on. The router will check the metadata + // populated by the codec plugin against all the specified key:value pairs in the route config. A match will happen if all the + // key:value pairs in the route are present in the request metadata with the same values (or based on presence if + // the value field is not in the config). + repeated envoy.config.route.v3.HeaderMatcher metadata = 1; +} + +message RouteAction { + oneof cluster_specifier { + option (validate.required) = true; + + // Indicates the upstream cluster to which the request should be routed. + string cluster = 1; + + // Multiple upstream clusters can be specified for a given route. The + // request is routed to one of the upstream clusters based on weights + // assigned to each cluster. + // Currently ClusterWeight only supports the name and weight fields. + envoy.config.route.v3.WeightedCluster weighted_clusters = 2; + } +} diff --git a/bazel/EXTERNAL_DEPS.md b/bazel/EXTERNAL_DEPS.md index 9820ff4cf993..0e1cdb0e2744 100644 --- a/bazel/EXTERNAL_DEPS.md +++ b/bazel/EXTERNAL_DEPS.md @@ -25,7 +25,7 @@ This is the preferred style of adding dependencies that use CMake for their buil 1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. -2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference +2. Add an `envoy_cmake` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference the source repository in step 1. 3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1 `external_deps` attribute. diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index 4ad3433fb646..18b1c2275e21 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -144,7 +144,10 @@ envoy_cc_library( hdrs = ["quiche/http2/http2_constants.h"], copts = quiche_copts, repository = "@envoy", - deps = [":http2_platform"], + deps = [ + ":http2_platform", + ":quiche_common_text_utils_lib", + ], ) envoy_cc_library( @@ -837,6 +840,7 @@ envoy_cc_library( deps = [ ":quiche_common_lib", ":quiche_common_platform", + ":quiche_common_text_utils_lib", ":spdy_core_header_storage_lib", ], ) @@ -1931,6 +1935,7 @@ envoy_cc_library( deps = [ ":quic_core_clock_lib", ":quic_core_crypto_certificate_view_lib", + ":quic_core_crypto_client_proof_source_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_hkdf_lib", ":quic_core_crypto_proof_source_lib", @@ -2077,6 +2082,7 @@ envoy_cc_library( tags = ["nofips"], visibility = ["//visibility:public"], deps = [ + ":quic_core_crypto_certificate_view_lib", ":quic_core_packets_lib", ":quic_core_versions_lib", ":quic_platform_base", @@ -2084,6 +2090,24 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "quic_core_crypto_client_proof_source_lib", + srcs = [ + "quiche/quic/core/crypto/client_proof_source.cc", + ], + hdrs = [ + "quiche/quic/core/crypto/client_proof_source.h", + ], + copts = quiche_copts, + repository = "@envoy", + tags = ["nofips"], + visibility = ["//visibility:public"], + deps = [ + ":quic_core_crypto_proof_source_lib", + ":quic_platform_base", + ], +) + envoy_cc_library( name = "quic_core_crypto_random_lib", srcs = ["quiche/quic/core/crypto/quic_random.cc"], diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 999a775c38bf..0ef5d8407dac 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -698,11 +698,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Packaging rules for Bazel", project_desc = "Bazel rules for the packaging distributions", project_url = "https://github.com/bazelbuild/rules_pkg", - version = "0.5.1", - sha256 = "a89e203d3cf264e564fcb96b6e06dd70bc0557356eb48400ce4b5d97c2c3720d", - urls = ["https://github.com/bazelbuild/rules_pkg/releases/download/{version}/rules_pkg-{version}.tar.gz"], + version = "ad57589abb069baa48f982778de408ea02d714fd", + sha256 = "ec14799a45f1d3b6c3e61c4d04513001bddac9208f09077b1f8c91ab47d234d2", + strip_prefix = "rules_pkg-{version}/pkg", + urls = ["https://github.com/bazelbuild/rules_pkg/archive/{version}.tar.gz"], use_category = ["build"], - release_date = "2021-08-18", + release_date = "2021-10-22", ), six = dict( project_name = "Six", @@ -835,12 +836,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "b6e4ee8a1617031b04c8d5f4773469c742d3aaa1", - sha256 = "3947598748ab0034fe44f18a1fdda89d4418f9a64bcf583acbb5c25252a391a5", + version = "4c6ad6445246da3c6d3e7db920003321880048f8", + sha256 = "2a9823044b97b6055c2e4d84f6bdff5c4f66b9f18333ff58e270d23091a2b4ca", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["dataplane_core"], - release_date = "2021-10-18", + release_date = "2021-10-26", cpe = "N/A", ), com_googlesource_googleurl = dict( diff --git a/ci/filter_example_setup.sh b/ci/filter_example_setup.sh index 5ef74fa49119..2447a79e41d4 100644 --- a/ci/filter_example_setup.sh +++ b/ci/filter_example_setup.sh @@ -25,6 +25,8 @@ sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel/ cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ +rm -f "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/.bazelversion +cp -f "${ENVOY_SRCDIR}"/.bazelversion "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ cp -f "$(bazel info workspace)"/*.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ export FILTER_WORKSPACE_SET=1 diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 6b6d65be13c7..f714f6e5cc5a 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -32,6 +32,7 @@ Removed Config or Runtime * http: removed ``envoy.reloadable_features.add_and_validate_scheme_header`` and legacy code paths. * http: removed ``envoy.reloadable_features.check_unsupported_typed_per_filter_config``, Envoy will always check unsupported typed per filter config if the filter isn't optional. * http: removed ``envoy.reloadable_features.dont_add_content_length_for_bodiless_requests deprecation`` and legacy code paths. +* http: removed ``envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits`` and legacy code paths. * http: removed ``envoy.reloadable_features.http2_skip_encoding_empty_trailers`` and legacy code paths. Envoy will always encode empty trailers by sending empty data with ``end_stream`` true (instead of sending empty trailers) for HTTP/2. * http: removed ``envoy.reloadable_features.improved_stream_limit_handling`` and legacy code paths. * http: removed ``envoy.reloadable_features.remove_forked_chromium_url`` and legacy code paths. diff --git a/envoy/http/codec.h b/envoy/http/codec.h index a1d5fc829ae0..9a336b267fd6 100644 --- a/envoy/http/codec.h +++ b/envoy/http/codec.h @@ -394,6 +394,14 @@ class ConnectionCallbacks { * @param ReceivedSettings the settings received from the peer. */ virtual void onSettings(ReceivedSettings& settings) { UNREFERENCED_PARAMETER(settings); } + + /** + * Fires when the MAX_STREAMS frame is received from the peer. + * This is an HTTP/3 frame, indicating the new maximum stream ID which can be opened. + * This may occur multiple times across the lifetime of an HTTP/3 connection. + * @param num_streams the number of streams now allowed to be opened. + */ + virtual void onMaxStreamsChanged(uint32_t num_streams) { UNREFERENCED_PARAMETER(num_streams); } }; /** diff --git a/envoy/network/BUILD b/envoy/network/BUILD index 6ff54a4c4c38..3a292bdd6cd7 100644 --- a/envoy/network/BUILD +++ b/envoy/network/BUILD @@ -63,6 +63,15 @@ envoy_cc_library( deps = ["//envoy/network:address_interface"], ) +envoy_cc_library( + name = "dns_resolver_interface", + hdrs = ["dns_resolver.h"], + deps = [ + "//envoy/api:api_interface", + "//source/common/config:utility_lib", + ], +) + envoy_cc_library( name = "drain_decision_interface", hdrs = ["drain_decision.h"], diff --git a/envoy/network/dns_resolver.h b/envoy/network/dns_resolver.h new file mode 100644 index 000000000000..2a326f152fe8 --- /dev/null +++ b/envoy/network/dns_resolver.h @@ -0,0 +1,32 @@ +#pragma once + +#include "envoy/api/api.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/dns.h" + +#include "source/common/config/utility.h" + +namespace Envoy { +namespace Network { + +constexpr absl::string_view CaresDnsResolver = "envoy.network.dns_resolver.cares"; +constexpr absl::string_view AppleDnsResolver = "envoy.network.dns_resolver.apple"; +constexpr absl::string_view DnsResolverCategory = "envoy.network.dns_resolver"; + +class DnsResolverFactory : public Config::TypedFactory { +public: + /* + * @returns a DnsResolver object. + * @param dispatcher: the local dispatcher thread + * @param api: API interface to interact with system resources + * @param typed_dns_resolver_config: the typed DNS resolver config + */ + virtual DnsResolverSharedPtr createDnsResolver( + Event::Dispatcher& dispatcher, Api::Api& api, + const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) const PURE; + + std::string category() const override { return std::string(DnsResolverCategory); } +}; + +} // namespace Network +} // namespace Envoy diff --git a/envoy/upstream/load_balancer.h b/envoy/upstream/load_balancer.h index eea3f5888c8b..75252fae82d6 100644 --- a/envoy/upstream/load_balancer.h +++ b/envoy/upstream/load_balancer.h @@ -141,7 +141,7 @@ class LoadBalancer { virtual HostConstSharedPtr peekAnotherHost(LoadBalancerContext* context) PURE; /** - * Returns connnection lifetime callbacks that may be used to inform the load balancer of + * Returns connection lifetime callbacks that may be used to inform the load balancer of * connection events. Load balancers which do not intend to track connection lifetime events * will return nullopt. * @return optional lifetime callbacks for this load balancer. diff --git a/repokitteh.star b/repokitteh.star index 57c90e3599eb..13810d16f444 100644 --- a/repokitteh.star +++ b/repokitteh.star @@ -18,14 +18,14 @@ use( }, { "owner": "envoyproxy/api-shepherds!", - "path": "api/envoy/", + "path": "(api/envoy/|docs/root/api-docs/)", "label": "api", "github_status_label": "any API change", "auto_assign": True, }, { "owner": "envoyproxy/api-watchers", - "path": "api/envoy/", + "path": "(api/envoy/|docs/root/api-docs/)", }, { "owner": "envoyproxy/dependency-shepherds!", diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h index 12152a927302..7368b4880e1b 100644 --- a/source/common/buffer/buffer_impl.h +++ b/source/common/buffer/buffer_impl.h @@ -397,7 +397,7 @@ class Slice { /** Length of the byte array that base_ points to. This is also the offset in bytes from the start * of the slice to the end of the Reservable section. */ - uint64_t capacity_; + uint64_t capacity_ = 0; /** Backing storage for mutable slices which own their own storage. This storage should never be * accessed directly; access base_ instead. */ @@ -407,11 +407,11 @@ class Slice { uint8_t* base_{nullptr}; /** Offset in bytes from the start of the slice to the start of the Data section. */ - uint64_t data_; + uint64_t data_ = 0; /** Offset in bytes from the start of the slice to the start of the Reservable section which is * also the end of the Data section. */ - uint64_t reservable_; + uint64_t reservable_ = 0; /** Hooks to execute when the slice is destroyed. */ std::list> drain_trackers_; diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index 4cd623df5061..096f27a20683 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -175,18 +175,23 @@ void ConnPoolImplBase::attachStreamToClient(Envoy::ConnectionPool::ActiveClient& } ENVOY_CONN_LOG(debug, "creating stream", client); + // Latch capacity before updating remaining streams. + uint64_t capacity = client.currentUnusedCapacity(); client.remaining_streams_--; if (client.remaining_streams_ == 0) { ENVOY_CONN_LOG(debug, "maximum streams per connection, DRAINING", client); host_->cluster().stats().upstream_cx_max_requests_.inc(); transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::DRAINING); - } else if (client.numActiveStreams() + 1 >= client.concurrent_stream_limit_) { + } else if (capacity == 1) { // As soon as the new stream is created, the client will be maxed out. transitionActiveClientState(client, Envoy::ConnectionPool::ActiveClient::State::BUSY); } // Decrement the capacity, as there's one less stream available for serving. - state_.decrConnectingAndConnectedStreamCapacity(1); + // For HTTP/3, the capacity is updated in newStreamEncoder. + if (trackStreamCapacity()) { + state_.decrConnectingAndConnectedStreamCapacity(1); + } // Track the new active stream. state_.incrActiveStreams(1); num_active_streams_++; @@ -213,14 +218,17 @@ void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& clien // If the effective client capacity was limited by concurrency, increase connecting capacity. // If the effective client capacity was limited by max total streams, this will not result in an // increment as no capacity is freed up. - if (client.remaining_streams_ > client.concurrent_stream_limit_ - client.numActiveStreams() - 1 || - had_negative_capacity) { + // We don't update the capacity for HTTP/3 as the stream count should only + // increase when a MAX_STREAMS frame is received. + if (trackStreamCapacity() && (client.remaining_streams_ > client.concurrent_stream_limit_ - + client.numActiveStreams() - 1 || + had_negative_capacity)) { state_.incrConnectingAndConnectedStreamCapacity(1); } if (client.state() == ActiveClient::State::DRAINING && client.numActiveStreams() == 0) { // Close out the draining client if we no longer have active streams. client.close(); - } else if (client.state() == ActiveClient::State::BUSY) { + } else if (client.state() == ActiveClient::State::BUSY && client.currentUnusedCapacity() != 0) { transitionActiveClientState(client, ActiveClient::State::READY); if (!delay_attaching_stream) { onUpstreamReady(); @@ -296,6 +304,9 @@ void ConnPoolImplBase::onUpstreamReady() { state_.decrPendingStreams(1); pending_streams_.pop_back(); } + if (!pending_streams_.empty()) { + tryCreateNewConnections(); + } } std::list& ConnPoolImplBase::owningList(ActiveClient::State state) { diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 0c47346f5725..29a714410f8c 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -56,7 +56,7 @@ class ActiveClient : public LinkedObject, // Returns the application protocol, or absl::nullopt for TCP. virtual absl::optional protocol() const PURE; - int64_t currentUnusedCapacity() const { + virtual int64_t currentUnusedCapacity() const { int64_t remaining_concurrent_streams = static_cast(concurrent_stream_limit_) - numActiveStreams(); @@ -102,6 +102,11 @@ class ActiveClient : public LinkedObject, virtual void drain(); ConnPoolImplBase& parent_; + // The count of remaining streams allowed for this connection. + // This will start out as the total number of streams per connection if capped + // by configuration, or it will be set to std::numeric_limits::max() to be + // (functionally) unlimited. + // TODO: this could be moved to an optional to make it actually unlimited. uint32_t remaining_streams_; uint32_t concurrent_stream_limit_; Upstream::HostDescriptionConstSharedPtr real_host_description_; @@ -148,6 +153,10 @@ class ConnPoolImplBase : protected Logger::Loggable { virtual ~ConnPoolImplBase(); void deleteIsPendingImpl(); + // By default, the connection pool will track connected and connecting stream + // capacity as streams are created and destroyed. QUIC does custom stream + // accounting so will override this to false. + virtual bool trackStreamCapacity() { return true; } // A helper function to get the specific context type from the base class context. template T& typedContext(AttachContext& context) { @@ -234,6 +243,9 @@ class ConnPoolImplBase : protected Logger::Loggable { void decrClusterStreamCapacity(uint32_t delta) { state_.decrConnectingAndConnectedStreamCapacity(delta); } + void incrClusterStreamCapacity(uint32_t delta) { + state_.incrConnectingAndConnectedStreamCapacity(delta); + } void dumpState(std::ostream& os, int indent_level = 0) const { const char* spaces = spacesForLevel(indent_level); os << spaces << "ConnPoolImplBase " << this << DUMP_MEMBER(ready_clients_.size()) @@ -255,6 +267,9 @@ class ConnPoolImplBase : protected Logger::Loggable { connecting_stream_capacity_ -= delta; } + // Called when an upstream is ready to serve pending streams. + void onUpstreamReady(); + protected: virtual void onConnected(Envoy::ConnectionPool::ActiveClient&) {} @@ -265,7 +280,6 @@ class ConnPoolImplBase : protected Logger::Loggable { NoConnectionRateLimited, CreatedButRateLimited, }; - // Creates up to 3 connections, based on the preconnect ratio. // Returns the ConnectionResult of the last attempt. ConnectionResult tryCreateNewConnections(); @@ -342,7 +356,6 @@ class ConnPoolImplBase : protected Logger::Loggable { // True iff this object is in the deferred delete list. bool deferred_deleting_{false}; - void onUpstreamReady(); Event::SchedulableCallbackPtr upstream_ready_cb_; }; diff --git a/source/common/event/BUILD b/source/common/event/BUILD index 42f6547a67e4..22efdc958ac2 100644 --- a/source/common/event/BUILD +++ b/source/common/event/BUILD @@ -43,7 +43,6 @@ envoy_cc_library( "//envoy/network:listener_interface", "//source/common/common:assert_lib", "//source/common/common:thread_lib", - "//source/common/config:utility_lib", "//source/common/filesystem:watcher_lib", "//source/common/network:connection_lib", "//source/common/network:listener_lib", diff --git a/source/common/event/file_event_impl.cc b/source/common/event/file_event_impl.cc index 5b83e694d548..4c68e6193fc4 100644 --- a/source/common/event/file_event_impl.cc +++ b/source/common/event/file_event_impl.cc @@ -55,6 +55,9 @@ void FileEventImpl::activate(uint32_t events) { void FileEventImpl::assignEvents(uint32_t events, event_base* base) { ASSERT(dispatcher_.isThreadSafe()); ASSERT(base != nullptr); + // TODO(antoniovicente) remove this once ConnectionImpl can + // handle Read and Close events delivered together. + ASSERT(!((events & FileReadyType::Read) && (events & FileReadyType::Closed))); enabled_events_ = events; event_assign( &raw_event_, base, fd_, @@ -120,7 +123,6 @@ void FileEventImpl::unregisterEventIfEmulatedEdge(uint32_t event) { ASSERT(dispatcher_.isThreadSafe()); // This constexpr if allows the compiler to optimize away the function on POSIX if constexpr (PlatformDefaultTriggerType == FileTriggerType::EmulatedEdge) { - ASSERT((event & (FileReadyType::Read | FileReadyType::Write)) == event); if (trigger_ == FileTriggerType::EmulatedEdge) { auto new_event_mask = enabled_events_ & ~event; updateEvents(new_event_mask); @@ -156,7 +158,6 @@ void FileEventImpl::mergeInjectedEventsAndRunCb(uint32_t events) { injected_activation_events_ = injected_activation_events_ & ~FileReadyType::Read; } } - events |= injected_activation_events_; injected_activation_events_ = 0; activation_cb_->cancel(); diff --git a/source/common/http/codec_client.h b/source/common/http/codec_client.h index 684c962380f3..093e28304402 100644 --- a/source/common/http/codec_client.h +++ b/source/common/http/codec_client.h @@ -158,6 +158,11 @@ class CodecClient : protected Logger::Loggable, codec_callbacks_->onSettings(settings); } } + void onMaxStreamsChanged(uint32_t num_streams) override { + if (codec_callbacks_) { + codec_callbacks_->onMaxStreamsChanged(num_streams); + } + } void onIdleTimeout() { host_->cluster().stats().upstream_cx_idle_timeout_.inc(); diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index fbc3e4503607..c4a53797c747 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -30,17 +30,21 @@ ActiveClient::ActiveClient(Envoy::Http::HttpConnPoolImplBase& parent, parent.host()->cluster().stats().upstream_cx_http3_total_, data) { } +void ActiveClient::onMaxStreamsChanged(uint32_t num_streams) { + updateCapacity(num_streams); + if (state() == ActiveClient::State::BUSY && currentUnusedCapacity() != 0) { + parent_.transitionActiveClientState(*this, ActiveClient::State::READY); + // If there's waiting streams, make sure the pool will now serve them. + parent_.onUpstreamReady(); + } +} + void Http3ConnPoolImpl::setQuicConfigFromClusterConfig(const Upstream::ClusterInfo& cluster, quic::QuicConfig& quic_config) { - // TODO(alyssawilk) use and test other defaults. + Quic::convertQuicConfig(cluster.http3Options().quic_protocol_options(), quic_config); quic::QuicTime::Delta crypto_timeout = quic::QuicTime::Delta::FromMilliseconds(cluster.connectTimeout().count()); quic_config.set_max_time_before_crypto_handshake(crypto_timeout); - int32_t max_streams = getMaxStreams(cluster); - quic_config.SetMaxBidirectionalStreamsToSend(max_streams); - quic_config.SetMaxUnidirectionalStreamsToSend(max_streams); - Quic::configQuicInitialFlowControlWindow(cluster.http3Options().quic_protocol_options(), - quic_config); } Http3ConnPoolImpl::Http3ConnPoolImpl( diff --git a/source/common/http/http3/conn_pool.h b/source/common/http/http3/conn_pool.h index 6ae87420207e..2db0426630d6 100644 --- a/source/common/http/http3/conn_pool.h +++ b/source/common/http/http3/conn_pool.h @@ -23,6 +23,65 @@ class ActiveClient : public MultiplexedActiveClientBase { public: ActiveClient(Envoy::Http::HttpConnPoolImplBase& parent, Upstream::Host::CreateConnectionData& data); + + // Http::ConnectionCallbacks + void onMaxStreamsChanged(uint32_t num_streams) override; + + RequestEncoder& newStreamEncoder(ResponseDecoder& response_decoder) override { + ASSERT(quiche_capacity_ != 0); + // Each time a quic stream is allocated the quic capacity needs to get + // decremented. See comments by quiche_capacity_. + updateCapacity(quiche_capacity_ - 1); + return MultiplexedActiveClientBase::newStreamEncoder(response_decoder); + } + + // Overload the default capacity calculations to return the quic capacity + // (modified by any stream limits in Envoy config) + int64_t currentUnusedCapacity() const override { + return std::min(quiche_capacity_, effectiveConcurrentStreamLimit()); + } + + void updateCapacity(uint64_t new_quiche_capacity) { + // Each time we update the capacity make sure to reflect the update in the + // connection pool. + // + // Due to interplay between the max number of concurrent streams Envoy will + // allow and the max number of streams per connection this is not as simple + // as just updating based on the delta between quiche_capacity_ and + // new_quiche_capacity, so we use the delta between the actual calculated + // capacity before and after the update. + uint64_t old_capacity = currentUnusedCapacity(); + quiche_capacity_ = new_quiche_capacity; + uint64_t new_capacity = currentUnusedCapacity(); + + if (new_capacity < old_capacity) { + parent_.decrClusterStreamCapacity(old_capacity - new_capacity); + } else if (old_capacity < new_capacity) { + parent_.incrClusterStreamCapacity(new_capacity - old_capacity); + } + } + + // Unlike HTTP/2 and HTTP/1, rather than having a cap on the number of active + // streams, QUIC has a fixed number of streams available which is updated via + // the MAX_STREAMS frame. + // + // As such each time we create a new stream for QUIC, the capacity goes down + // by one, but unlike the other two codecs it is _not_ restored on stream + // closure. + // + // We track the QUIC capacity here, and overload currentUnusedCapacity so the + // connection pool can accurately keep track of when it is safe to create new + // streams. + // + // Though HTTP/3 should arguably start out with 0 stream capacity until the + // initial handshake is complete and MAX_STREAMS frame has been received, + // assume optimistically it will get ~100 streams, so that the connection pool + // won't fetch a connection for each incoming stream but will assume that the + // first connection will likely be able to serve 100. + // This number will be updated to the correct value before the connection is + // deemed connected, at which point further connections will be established if + // necessary. + uint64_t quiche_capacity_ = 100; }; // Http3 subclass of FixedHttpConnPoolImpl which exists to store quic data. @@ -45,6 +104,9 @@ class Http3ConnPoolImpl : public FixedHttpConnPoolImpl { quic::QuicConfig& quic_config); Quic::PersistentQuicInfoImpl& quicInfo() { return *quic_info_; } + // For HTTP/3 the base connection pool does not track stream capacity, rather + // the HTTP3 active client does. + bool trackStreamCapacity() override { return false; } private: // Store quic helpers which can be shared between connections and must live diff --git a/source/common/network/dns_resolver/BUILD b/source/common/network/dns_resolver/BUILD index 10a556d92454..1c1284e246d3 100644 --- a/source/common/network/dns_resolver/BUILD +++ b/source/common/network/dns_resolver/BUILD @@ -9,12 +9,11 @@ licenses(["notice"]) # Apache 2 envoy_package() envoy_cc_library( - name = "dns_factory_lib", - srcs = ["dns_factory.cc"], - hdrs = ["dns_factory.h"], + name = "dns_factory_util_lib", + srcs = ["dns_factory_util.cc"], + hdrs = ["dns_factory_util.h"], deps = [ - "//envoy/api:api_interface", - "//source/common/config:utility_lib", + "//envoy/network:dns_resolver_interface", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", diff --git a/source/common/network/dns_resolver/dns_factory.cc b/source/common/network/dns_resolver/dns_factory_util.cc similarity index 98% rename from source/common/network/dns_resolver/dns_factory.cc rename to source/common/network/dns_resolver/dns_factory_util.cc index 4df5ea8e4392..d8d1712cab95 100644 --- a/source/common/network/dns_resolver/dns_factory.cc +++ b/source/common/network/dns_resolver/dns_factory_util.cc @@ -1,4 +1,4 @@ -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" namespace Envoy { namespace Network { diff --git a/source/common/network/dns_resolver/dns_factory.h b/source/common/network/dns_resolver/dns_factory_util.h similarity index 86% rename from source/common/network/dns_resolver/dns_factory.h rename to source/common/network/dns_resolver/dns_factory_util.h index b1a8bf5fb404..6eb1c0de0ac1 100644 --- a/source/common/network/dns_resolver/dns_factory.h +++ b/source/common/network/dns_resolver/dns_factory_util.h @@ -1,40 +1,18 @@ #pragma once -#include "envoy/api/api.h" #include "envoy/config/bootstrap/v3/bootstrap.pb.h" #include "envoy/config/cluster/v3/cluster.pb.h" -#include "envoy/event/dispatcher.h" #include "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.pb.h" #include "envoy/extensions/filters/udp/dns_filter/v3/dns_filter.pb.h" #include "envoy/extensions/network/dns_resolver/apple/v3/apple_dns_resolver.pb.h" #include "envoy/extensions/network/dns_resolver/cares/v3/cares_dns_resolver.pb.h" -#include "envoy/network/dns.h" +#include "envoy/network/dns_resolver.h" -#include "source/common/config/utility.h" #include "source/common/runtime/runtime_features.h" namespace Envoy { namespace Network { -constexpr absl::string_view CaresDnsResolver = "envoy.network.dns_resolver.cares"; -constexpr absl::string_view AppleDnsResolver = "envoy.network.dns_resolver.apple"; -constexpr absl::string_view DnsResolverCategory = "envoy.network.dns_resolver"; - -class DnsResolverFactory : public Config::TypedFactory { -public: - /** - * @returns a DnsResolver object. - * @param dispatcher: the local dispatcher thread - * @param api: API interface to interact with system resources - * @param typed_dns_resolver_config: the typed DNS resolver config - */ - virtual DnsResolverSharedPtr createDnsResolver( - Event::Dispatcher& dispatcher, Api::Api& api, - const envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config) const PURE; - - std::string category() const override { return std::string(DnsResolverCategory); } -}; - // Create a default c-ares DNS resolver typed config. void makeDefaultCaresDnsResolverConfig( envoy::config::core::v3::TypedExtensionConfig& typed_dns_resolver_config); diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index 7e3d761deded..5aa56cada9b6 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -254,11 +254,7 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( : 20000; quic_config_.set_max_time_before_crypto_handshake( quic::QuicTime::Delta::FromMilliseconds(max_time_before_crypto_handshake_ms)); - int32_t max_streams = - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.quic_protocol_options(), max_concurrent_streams, 100); - quic_config_.SetMaxBidirectionalStreamsToSend(max_streams); - quic_config_.SetMaxUnidirectionalStreamsToSend(max_streams); - configQuicInitialFlowControlWindow(config.quic_protocol_options(), quic_config_); + convertQuicConfig(config.quic_protocol_options(), quic_config_); // Initialize crypto stream factory. envoy::config::core::v3::TypedExtensionConfig crypto_stream_config; diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index 6879f5aec326..4cf14c31db3b 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -5,6 +5,21 @@ #include "quic_filter_manager_connection_impl.h" +namespace quic { +namespace test { + +// TODO(alyssawilk) add the necessary accessors to quiche and remove this. +class QuicSessionPeer { +public: + static quic::QuicStreamIdManager& + getStreamIdManager(Envoy::Quic::EnvoyQuicClientSession* session) { + return session->ietf_streamid_manager_.bidirectional_stream_id_manager_; + } +}; + +} // namespace test +} // namespace quic + namespace Envoy { namespace Quic { @@ -19,9 +34,8 @@ EnvoyQuicClientSession::EnvoyQuicClientSession( send_buffer_limit), quic::QuicSpdyClientSession(config, supported_versions, connection.release(), server_id, crypto_config.get(), push_promise_index), - host_name_(server_id.host()), crypto_config_(crypto_config), - crypto_stream_factory_(crypto_stream_factory), quic_stat_names_(quic_stat_names), - scope_(scope) { + crypto_config_(crypto_config), crypto_stream_factory_(crypto_stream_factory), + quic_stat_names_(quic_stat_names), scope_(scope) { quic_ssl_info_ = std::make_shared(*this); } @@ -30,7 +44,7 @@ EnvoyQuicClientSession::~EnvoyQuicClientSession() { network_connection_ = nullptr; } -absl::string_view EnvoyQuicClientSession::requestedServerName() const { return host_name_; } +absl::string_view EnvoyQuicClientSession::requestedServerName() const { return server_id().host(); } void EnvoyQuicClientSession::connect() { dynamic_cast(network_connection_) @@ -82,6 +96,16 @@ void EnvoyQuicClientSession::OnRstStream(const quic::QuicRstStreamFrame& frame) /*from_self*/ false, /*is_upstream*/ true); } +void EnvoyQuicClientSession::OnCanCreateNewOutgoingStream(bool unidirectional) { + if (!http_connection_callbacks_ || unidirectional) { + return; + } + uint32_t streams_available = streamsAvailable(); + if (streams_available > 0) { + http_connection_callbacks_->onMaxStreamsChanged(streams_available); + } +} + std::unique_ptr EnvoyQuicClientSession::CreateClientStream() { ASSERT(codec_stats_.has_value() && http3_options_.has_value()); return std::make_unique(GetNextOutgoingBidirectionalStreamId(), this, @@ -110,9 +134,22 @@ quic::QuicConnection* EnvoyQuicClientSession::quicConnection() { return initialized_ ? connection() : nullptr; } +uint64_t EnvoyQuicClientSession::streamsAvailable() { + quic::QuicStreamIdManager& manager = quic::test::QuicSessionPeer::getStreamIdManager(this); + ASSERT(manager.outgoing_max_streams() >= manager.outgoing_stream_count()); + uint32_t streams_available = manager.outgoing_max_streams() - manager.outgoing_stream_count(); + return streams_available; +} + void EnvoyQuicClientSession::OnTlsHandshakeComplete() { quic::QuicSpdyClientSession::OnTlsHandshakeComplete(); - raiseConnectionEvent(Network::ConnectionEvent::Connected); + + // TODO(alyssawilk) support the case where a connection starts with 0 max streams. + ASSERT(streamsAvailable()); + if (streamsAvailable() > 0) { + OnCanCreateNewOutgoingStream(false); + raiseConnectionEvent(Network::ConnectionEvent::Connected); + } } std::unique_ptr EnvoyQuicClientSession::CreateQuicCryptoStream() { diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index 36ed64bec3f5..59beb7083532 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -11,6 +11,8 @@ namespace Envoy { namespace Quic { +class EnvoyQuicClientSession; + // Act as a Network::ClientConnection to ClientCodec. // TODO(danzh) This class doesn't need to inherit Network::FilterManager // interface but need all other Network::Connection implementation in @@ -58,6 +60,7 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, void MaybeSendRstStreamFrame(quic::QuicStreamId id, quic::QuicResetStreamError error, quic::QuicStreamOffset bytes_written) override; void OnRstStream(const quic::QuicRstStreamFrame& frame) override; + // quic::QuicSpdyClientSessionBase bool ShouldKeepConnectionAlive() const override; // quic::ProofHandler @@ -73,6 +76,9 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, // QuicFilterManagerConnectionImpl void setHttp3Options(const envoy::config::core::v3::Http3ProtocolOptions& http3_options) override; + // Notify any registered connection pool when new streams are available. + void OnCanCreateNewOutgoingStream(bool) override; + using quic::QuicSpdyClientSession::PerformActionOnActiveStreams; protected: @@ -95,11 +101,11 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, quic::QuicConnection* quicConnection() override; private: + uint64_t streamsAvailable(); + // These callbacks are owned by network filters and quic session should outlive // them. Http::ConnectionCallbacks* http_connection_callbacks_{nullptr}; - // TODO(danzh) deprecate this field once server_id() is made const. - const std::string host_name_; std::shared_ptr crypto_config_; EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory_; QuicStatNames& quic_stat_names_; diff --git a/source/common/quic/envoy_quic_client_stream.cc b/source/common/quic/envoy_quic_client_stream.cc index ad26cf9e17d7..222f35f56e78 100644 --- a/source/common/quic/envoy_quic_client_stream.cc +++ b/source/common/quic/envoy_quic_client_stream.cc @@ -286,7 +286,9 @@ void EnvoyQuicClientStream::ResetWithError(quic::QuicResetStreamError error) { stats_.tx_reset_.inc(); // Upper layers expect calling resetStream() to immediately raise reset callbacks. runResetCallbacks(quicRstErrorToEnvoyLocalResetReason(error.internal_code())); - quic::QuicSpdyClientStream::ResetWithError(error); + if (session()->connection()->connected()) { + quic::QuicSpdyClientStream::ResetWithError(error); + } } void EnvoyQuicClientStream::OnConnectionClosed(quic::QuicErrorCode error, diff --git a/source/common/quic/envoy_quic_utils.cc b/source/common/quic/envoy_quic_utils.cc index b49fefff5785..433d379580da 100644 --- a/source/common/quic/envoy_quic_utils.cc +++ b/source/common/quic/envoy_quic_utils.cc @@ -237,6 +237,14 @@ createServerConnectionSocket(Network::IoHandle& io_handle, return connection_socket; } +void convertQuicConfig(const envoy::config::core::v3::QuicProtocolOptions& config, + quic::QuicConfig& quic_config) { + int32_t max_streams = PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_concurrent_streams, 100); + quic_config.SetMaxBidirectionalStreamsToSend(max_streams); + quic_config.SetMaxUnidirectionalStreamsToSend(max_streams); + configQuicInitialFlowControlWindow(config, quic_config); +} + void configQuicInitialFlowControlWindow(const envoy::config::core::v3::QuicProtocolOptions& config, quic::QuicConfig& quic_config) { size_t stream_flow_control_window_to_send = PROTOBUF_GET_WRAPPED_OR_DEFAULT( diff --git a/source/common/quic/envoy_quic_utils.h b/source/common/quic/envoy_quic_utils.h index 67ffd0dcf70c..ccef576914f9 100644 --- a/source/common/quic/envoy_quic_utils.h +++ b/source/common/quic/envoy_quic_utils.h @@ -178,6 +178,10 @@ createServerConnectionSocket(Network::IoHandle& io_handle, const quic::QuicSocketAddress& peer_address, const std::string& hostname, absl::string_view alpn); +// Alter QuicConfig based on all the options in the supplied config. +void convertQuicConfig(const envoy::config::core::v3::QuicProtocolOptions& config, + quic::QuicConfig& quic_config); + // Set initial flow control windows in quic_config according to the given Envoy config. void configQuicInitialFlowControlWindow(const envoy::config::core::v3::QuicProtocolOptions& config, quic::QuicConfig& quic_config); diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index ef480da22c0e..707299a14799 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -63,7 +63,6 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.fix_added_trailers", "envoy.reloadable_features.grpc_bridge_stats_disabled", "envoy.reloadable_features.grpc_web_fix_non_proto_encoded_response_handling", - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", "envoy.reloadable_features.hash_multiple_header_values", "envoy.reloadable_features.health_check.graceful_goaway_handling", "envoy.reloadable_features.http2_consume_stream_refused_errors", diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 603d9aa68a4a..eb64222b561a 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -634,7 +634,7 @@ envoy_cc_library( "//source/common/network:resolver_lib", "//source/common/network:socket_option_factory_lib", "//source/common/network:utility_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/protobuf", "//source/common/protobuf:utility_lib", "//source/server:transport_socket_config_lib", diff --git a/source/common/upstream/cluster_factory_impl.cc b/source/common/upstream/cluster_factory_impl.cc index 2ed161baca57..fdebb5066bc1 100644 --- a/source/common/upstream/cluster_factory_impl.cc +++ b/source/common/upstream/cluster_factory_impl.cc @@ -5,7 +5,7 @@ #include "source/common/http/utility.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/socket_option_factory.h" #include "source/common/upstream/health_checker_impl.h" diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 353b1d572f12..55f85f5416ed 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -836,72 +836,42 @@ ClusterInfoImpl::ClusterInfoImpl( "HttpProtocolOptions can be specified"); } - switch (config.lb_policy()) { - case envoy::config::cluster::v3::Cluster::ROUND_ROBIN: - lb_type_ = LoadBalancerType::RoundRobin; - break; - case envoy::config::cluster::v3::Cluster::LEAST_REQUEST: - lb_type_ = LoadBalancerType::LeastRequest; - break; - case envoy::config::cluster::v3::Cluster::RANDOM: - lb_type_ = LoadBalancerType::Random; - break; - case envoy::config::cluster::v3::Cluster::RING_HASH: - lb_type_ = LoadBalancerType::RingHash; - break; - case envoy::config::cluster::v3::Cluster::MAGLEV: - lb_type_ = LoadBalancerType::Maglev; - break; - case envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED: - if (config.has_lb_subset_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - lb_type_ = LoadBalancerType::ClusterProvided; - break; - case envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG: { - if (config.has_lb_subset_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - if (config.has_common_lb_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with common_lb_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - if (!config.has_load_balancing_policy()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} requires load_balancing_policy to be set", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - for (const auto& policy : config.load_balancing_policy().policies()) { - TypedLoadBalancerFactory* factory = - Config::Utility::getAndCheckFactory( - policy.typed_extension_config(), /*is_optional=*/true); - if (factory != nullptr) { - load_balancing_policy_ = policy; - load_balancer_factory_ = factory; - break; + // If load_balancing_policy is set we will use it directly, ignoring lb_policy. + if (config.has_load_balancing_policy()) { + configureLbPolicies(config); + } else { + switch (config.lb_policy()) { + case envoy::config::cluster::v3::Cluster::ROUND_ROBIN: + lb_type_ = LoadBalancerType::RoundRobin; + break; + case envoy::config::cluster::v3::Cluster::LEAST_REQUEST: + lb_type_ = LoadBalancerType::LeastRequest; + break; + case envoy::config::cluster::v3::Cluster::RANDOM: + lb_type_ = LoadBalancerType::Random; + break; + case envoy::config::cluster::v3::Cluster::RING_HASH: + lb_type_ = LoadBalancerType::RingHash; + break; + case envoy::config::cluster::v3::Cluster::MAGLEV: + lb_type_ = LoadBalancerType::Maglev; + break; + case envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED: + if (config.has_lb_subset_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); } - } - if (load_balancer_factory_ == nullptr) { - throw EnvoyException(fmt::format( - "Didn't find a registered load balancer factory implementation for cluster: '{}'", - name_)); + lb_type_ = LoadBalancerType::ClusterProvided; + break; + case envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG: { + configureLbPolicies(config); + break; + } + default: + NOT_REACHED_GCOVR_EXCL_LINE; } - - lb_type_ = LoadBalancerType::LoadBalancingPolicyConfig; - break; - } - default: - NOT_REACHED_GCOVR_EXCL_LINE; } if (config.lb_subset_config().locality_weight_aware() && @@ -960,6 +930,45 @@ ClusterInfoImpl::ClusterInfoImpl( } } +// Configures the load balancer based on config.load_balancing_policy +void ClusterInfoImpl::configureLbPolicies(const envoy::config::cluster::v3::Cluster& config) { + if (config.has_lb_subset_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (config.has_common_lb_config()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} cannot be combined with common_lb_config", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + if (!config.has_load_balancing_policy()) { + throw EnvoyException( + fmt::format("cluster: LB policy {} requires load_balancing_policy to be set", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); + } + + for (const auto& policy : config.load_balancing_policy().policies()) { + TypedLoadBalancerFactory* factory = + Config::Utility::getAndCheckFactory( + policy.typed_extension_config(), /*is_optional=*/true); + if (factory != nullptr) { + load_balancing_policy_ = policy; + load_balancer_factory_ = factory; + break; + } + } + + if (load_balancer_factory_ == nullptr) { + throw EnvoyException(fmt::format( + "Didn't find a registered load balancer factory implementation for cluster: '{}'", name_)); + } + + lb_type_ = LoadBalancerType::LoadBalancingPolicyConfig; +} + ProtocolOptionsConfigConstSharedPtr ClusterInfoImpl::extensionProtocolOptions(const std::string& name) const { auto i = extension_protocol_options_.find(name); diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 8c4c4d8661c6..56486741bcb1 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -634,6 +634,7 @@ class ClusterInfoImpl : public ClusterInfo, const envoy::config::core::v3::HttpProtocolOptions& commonHttpProtocolOptions() const override { return http_protocol_options_->common_http_protocol_options_; } + void configureLbPolicies(const envoy::config::cluster::v3::Cluster& config); ProtocolOptionsConfigConstSharedPtr extensionProtocolOptions(const std::string& name) const override; LoadBalancerType lbType() const override { return lb_type_; } diff --git a/source/extensions/common/dynamic_forward_proxy/BUILD b/source/extensions/common/dynamic_forward_proxy/BUILD index d8abc11812b0..ceb356d83150 100644 --- a/source/extensions/common/dynamic_forward_proxy/BUILD +++ b/source/extensions/common/dynamic_forward_proxy/BUILD @@ -49,7 +49,7 @@ envoy_cc_library( "//source/common/config:utility_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "@envoy_api//envoy/extensions/common/dynamic_forward_proxy/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc index 05801becc104..82ce6a295779 100644 --- a/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc +++ b/source/extensions/common/dynamic_forward_proxy/dns_cache_impl.cc @@ -6,7 +6,7 @@ #include "source/common/common/stl_helpers.h" #include "source/common/config/utility.h" #include "source/common/http/utility.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/utility.h" diff --git a/source/extensions/common/wasm/BUILD b/source/extensions/common/wasm/BUILD index bf5b6a7273fc..93f33d4a6570 100644 --- a/source/extensions/common/wasm/BUILD +++ b/source/extensions/common/wasm/BUILD @@ -82,7 +82,7 @@ envoy_cc_library( "//source/common/http:message_lib", "//source/common/http:utility_lib", "//source/common/tracing:http_tracer_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/extensions/common/wasm/ext:declare_property_cc_proto", "//source/extensions/common/wasm/ext:envoy_null_vm_wasm_api", "//source/extensions/filters/common/expr:context_lib", diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index fbc06528dbfa..4fc6c6a36caf 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -6,7 +6,7 @@ #include "envoy/event/deferred_deletable.h" #include "source/common/common/logger.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/extensions/common/wasm/plugin.h" #include "source/extensions/common/wasm/stats_handler.h" diff --git a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc index 32014e2ed3d8..3bb7f57c5637 100644 --- a/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc +++ b/source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc @@ -62,9 +62,6 @@ using RcDetails = ConstSingleton; namespace { -constexpr absl::string_view buffer_limits_runtime_feature = - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits"; - const Http::LowerCaseString& trailerHeader() { CONSTRUCT_ON_FIRST_USE(Http::LowerCaseString, "trailer"); } @@ -893,10 +890,6 @@ bool JsonTranscoderFilter::maybeConvertGrpcStatus(Grpc::Status::GrpcStatus grpc_ } bool JsonTranscoderFilter::decoderBufferLimitReached(uint64_t buffer_length) { - if (!Runtime::runtimeFeatureEnabled(buffer_limits_runtime_feature)) { - return false; - } - if (buffer_length > decoder_callbacks_->decoderBufferLimit()) { ENVOY_LOG(debug, "Request rejected because the transcoder's internal buffer size exceeds the " @@ -915,10 +908,6 @@ bool JsonTranscoderFilter::decoderBufferLimitReached(uint64_t buffer_length) { } bool JsonTranscoderFilter::encoderBufferLimitReached(uint64_t buffer_length) { - if (!Runtime::runtimeFeatureEnabled(buffer_limits_runtime_feature)) { - return false; - } - if (buffer_length > encoder_callbacks_->encoderBufferLimit()) { ENVOY_LOG(debug, "Response not transcoded because the transcoder's internal buffer size exceeds the " diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index 59b28e1febcb..8f77cbcf771e 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -61,10 +61,6 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { cb.dispatcher(), [this](uint32_t events) { ENVOY_LOG(trace, "http inspector event: {}", events); - // inspector is always peeking and can never determine EOF. - // Use this event type to avoid listener timeout on the OS supporting - // FileReadyType::Closed. - bool end_stream = events & Event::FileReadyType::Closed; const ParseState parse_state = onRead(); switch (parse_state) { @@ -78,19 +74,11 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { cb_->continueFilterChain(true); break; case ParseState::Continue: - if (end_stream) { - // Parser fails to determine http but the end of stream is reached. Fallback to - // non-http. - done(false); - cb_->socket().ioHandle().resetFileEvents(); - cb_->continueFilterChain(true); - } // do nothing but wait for the next event break; } }, - Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed); + Event::PlatformDefaultTriggerType, Event::FileReadyType::Read); return Network::FilterStatus::StopIteration; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -107,6 +95,11 @@ ParseState Filter::onRead() { return ParseState::Error; } + // Remote closed + if (result.return_value_ == 0) { + return ParseState::Error; + } + const auto parse_state = parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.return_value_)); switch (parse_state) { diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index fb0f06e6c2d5..92a70d07a46f 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -93,12 +93,6 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { socket.ioHandle().initializeFileEvent( cb.dispatcher(), [this](uint32_t events) { - if (events & Event::FileReadyType::Closed) { - config_->stats().connection_closed_.inc(); - done(false); - return; - } - ASSERT(events == Event::FileReadyType::Read); ParseState parse_state = onRead(); switch (parse_state) { @@ -113,8 +107,7 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { break; } }, - Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed); + Event::PlatformDefaultTriggerType, Event::FileReadyType::Read); return Network::FilterStatus::StopIteration; } NOT_REACHED_GCOVR_EXCL_LINE; @@ -176,6 +169,11 @@ ParseState Filter::onRead() { return ParseState::Error; } + if (result.return_value_ == 0) { + config_->stats().connection_closed_.inc(); + return ParseState::Error; + } + // Because we're doing a MSG_PEEK, data we've seen before gets returned every time, so // skip over what we've already processed. if (static_cast(result.return_value_) > read_) { diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.cc b/source/extensions/filters/network/dubbo_proxy/active_message.cc index e828884ff26f..bcdc2e0233d9 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.cc +++ b/source/extensions/filters/network/dubbo_proxy/active_message.cc @@ -183,7 +183,7 @@ void ActiveMessageEncoderFilter::continueEncoding() { ActiveMessage::ActiveMessage(ConnectionManager& parent) : parent_(parent), request_timer_(std::make_unique( parent_.stats().request_time_ms_, parent.timeSystem())), - request_id_(-1), stream_id_(parent.randomGenerator().random()), + stream_id_(parent.randomGenerator().random()), stream_info_(parent.timeSystem(), parent_.connection().connectionInfoProviderSharedPtr()), pending_stream_decoded_(false), local_response_sent_(false) { parent_.stats().request_active_.inc(); @@ -346,7 +346,6 @@ FilterStatus ActiveMessage::applyEncoderFilters(ActiveMessageEncoderFilter* filt void ActiveMessage::sendLocalReply(const DubboFilters::DirectResponse& response, bool end_stream) { ASSERT(metadata_); - metadata_->setRequestId(request_id_); parent_.sendLocalReply(*metadata_, response, end_stream); if (end_stream) { diff --git a/source/extensions/filters/network/dubbo_proxy/active_message.h b/source/extensions/filters/network/dubbo_proxy/active_message.h index 5e860be3ddc2..c310e85b8ab2 100644 --- a/source/extensions/filters/network/dubbo_proxy/active_message.h +++ b/source/extensions/filters/network/dubbo_proxy/active_message.h @@ -202,8 +202,6 @@ class ActiveMessage : public LinkedObject, std::list encoder_filters_; std::function encoder_filter_action_; - int32_t request_id_; - // This value is used in the calculation of the weighted cluster. uint64_t stream_id_; StreamInfo::StreamInfoImpl stream_info_; diff --git a/source/extensions/filters/udp/dns_filter/BUILD b/source/extensions/filters/udp/dns_filter/BUILD index dff84e520c8a..b6f2e60d1002 100644 --- a/source/extensions/filters/udp/dns_filter/BUILD +++ b/source/extensions/filters/udp/dns_filter/BUILD @@ -39,6 +39,7 @@ envoy_cc_library( "//source/common/config:datasource_lib", "//source/common/network:address_lib", "//source/common/network:utility_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/protobuf:message_validator_lib", "//source/common/runtime:runtime_lib", "//source/common/upstream:cluster_manager_lib", diff --git a/source/extensions/filters/udp/dns_filter/dns_filter.cc b/source/extensions/filters/udp/dns_filter/dns_filter.cc index 30991185aff9..f93343b4c7dd 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter.cc +++ b/source/extensions/filters/udp/dns_filter/dns_filter.cc @@ -5,7 +5,7 @@ #include "source/common/config/datasource.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/extensions/filters/udp/dns_filter/dns_filter_utils.h" diff --git a/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h index 65dca46aa2f4..4dbd364b59b3 100644 --- a/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h +++ b/source/extensions/filters/udp/dns_filter/dns_filter_resolver.h @@ -3,7 +3,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/network/dns.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/extensions/filters/udp/dns_filter/dns_parser.h" namespace Envoy { diff --git a/source/extensions/network/dns_resolver/apple/BUILD b/source/extensions/network/dns_resolver/apple/BUILD index c318b9d89b96..d8873ce661a6 100644 --- a/source/extensions/network/dns_resolver/apple/BUILD +++ b/source/extensions/network/dns_resolver/apple/BUILD @@ -29,7 +29,7 @@ envoy_cc_extension( "//source/common/common:linked_object", "//source/common/network:address_lib", "//source/common/network:utility_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/singleton:threadsafe_singleton", ], ) diff --git a/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc index 72a2c408f2ec..052ec5ab42af 100644 --- a/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc +++ b/source/extensions/network/dns_resolver/apple/apple_dns_impl.cc @@ -15,7 +15,7 @@ #include "source/common/common/assert.h" #include "source/common/common/fmt.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/utility.h" #include "absl/strings/str_join.h" diff --git a/source/extensions/network/dns_resolver/cares/BUILD b/source/extensions/network/dns_resolver/cares/BUILD index 854cd71e65fd..c69e443e4084 100644 --- a/source/extensions/network/dns_resolver/cares/BUILD +++ b/source/extensions/network/dns_resolver/cares/BUILD @@ -23,6 +23,6 @@ envoy_cc_extension( "//source/common/network:address_lib", "//source/common/network:resolver_lib", "//source/common/network:utility_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", ], ) diff --git a/source/extensions/network/dns_resolver/cares/dns_impl.cc b/source/extensions/network/dns_resolver/cares/dns_impl.cc index 58fb0d2be876..3ee95962f80e 100644 --- a/source/extensions/network/dns_resolver/cares/dns_impl.cc +++ b/source/extensions/network/dns_resolver/cares/dns_impl.cc @@ -13,7 +13,7 @@ #include "source/common/common/fmt.h" #include "source/common/common/thread.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/resolver_impl.h" #include "source/common/network/utility.h" diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index bcde799b276e..b86959385c3c 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -16,7 +16,7 @@ #include "source/common/common/assert.h" #include "source/common/common/random_generator.h" #include "source/common/grpc/common.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/protobuf/message_validator_impl.h" #include "source/common/quic/quic_stat_names.h" #include "source/common/router/context_impl.h" diff --git a/source/server/server.cc b/source/server/server.cc index 561a6162f933..6af174a4bd4e 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -38,7 +38,7 @@ #include "source/common/local_info/local_info_impl.h" #include "source/common/memory/stats.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/socket_interface.h" #include "source/common/network/socket_interface_impl.h" #include "source/common/network/tcp_listener_impl.h" diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index 6bcd002d3c39..d3a842ada010 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -88,8 +88,6 @@ TEST_P(FileEventImplActivateTest, Activate) { EXPECT_CALL(read_event, ready()); ReadyWatcher write_event; EXPECT_CALL(write_event, ready()); - ReadyWatcher closed_event; - EXPECT_CALL(closed_event, ready()); const FileTriggerType trigger = Event::PlatformDefaultTriggerType; @@ -103,14 +101,10 @@ TEST_P(FileEventImplActivateTest, Activate) { if (events & FileReadyType::Write) { write_event.ready(); } - - if (events & FileReadyType::Closed) { - closed_event.ready(); - } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); - file_event->activate(FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + file_event->activate(FileReadyType::Read | FileReadyType::Write); dispatcher->run(Event::Dispatcher::RunType::NonBlock); os_sys_calls_.close(fd); @@ -125,7 +119,6 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { ReadyWatcher fd_event; ReadyWatcher read_event; ReadyWatcher write_event; - ReadyWatcher closed_event; ReadyWatcher prepare_watcher; evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, @@ -140,19 +133,13 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { if (events & FileReadyType::Read) { read_event.ready(); file_event->activate(FileReadyType::Write); - file_event->activate(FileReadyType::Closed); } if (events & FileReadyType::Write) { write_event.ready(); - file_event->activate(FileReadyType::Closed); - } - - if (events & FileReadyType::Closed) { - closed_event.ready(); } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); testing::InSequence s; // First loop iteration: handle scheduled read event and the real write event produced by poll. @@ -166,13 +153,10 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { EXPECT_CALL(prepare_watcher, ready()); EXPECT_CALL(fd_event, ready()); EXPECT_CALL(write_event, ready()); - EXPECT_CALL(closed_event, ready()); - // Third loop iteration: handle close event scheduled while handling write. - EXPECT_CALL(prepare_watcher, ready()); - EXPECT_CALL(fd_event, ready()); - EXPECT_CALL(closed_event, ready()); - // Fourth loop iteration: poll returned no new real events. - EXPECT_CALL(prepare_watcher, ready()); + if constexpr (Event::PlatformDefaultTriggerType != Event::FileTriggerType::EmulatedEdge) { + // Third loop iteration: poll returned no new real events. + EXPECT_CALL(prepare_watcher, ready()); + } file_event->activate(FileReadyType::Read); dispatcher->run(Event::Dispatcher::RunType::NonBlock); @@ -189,7 +173,6 @@ TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { ReadyWatcher fd_event; ReadyWatcher read_event; ReadyWatcher write_event; - ReadyWatcher closed_event; ReadyWatcher prepare_watcher; evwatch_prepare_new(&static_cast(dispatcher.get())->base(), onWatcherReady, @@ -210,12 +193,8 @@ TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { if (events & FileReadyType::Write) { write_event.ready(); } - - if (events & FileReadyType::Closed) { - closed_event.ready(); - } }, - trigger, FileReadyType::Read | FileReadyType::Write | FileReadyType::Closed); + trigger, FileReadyType::Read | FileReadyType::Write); testing::InSequence s; // First loop iteration: handle scheduled read event and the real write event produced by poll. diff --git a/test/common/http/http2/codec_impl_test.cc b/test/common/http/http2/codec_impl_test.cc index 50d9b50802b3..a23b056b9a77 100644 --- a/test/common/http/http2/codec_impl_test.cc +++ b/test/common/http/http2/codec_impl_test.cc @@ -55,18 +55,6 @@ class Http2CodecImplTestFixture { static bool slowContainsStreamId(int id, ConnectionImpl& connection) { return connection.slowContainsStreamId(id); } - // The Http::Connection::dispatch method does not throw (any more). However unit tests in this - // file use codecs for sending test data through mock network connections to the codec under test. - // It is infeasible to plumb error codes returned by the dispatch() method of the codecs under - // test, through mock connections and sending codec. As a result error returned by the dispatch - // method of the codec under test invoked by the ConnectionWrapper is thrown as an exception. Note - // that exception goes only through the mock network connection and sending codec, i.e. it is - // thrown only through the test harness code. Specific exception types are to distinguish error - // codes returned when processing requests or responses. - // TODO(yanavlasov): modify the code to verify test expectations at the point of calling codec - // under test through the ON_CALL expectations in the - // setupDefaultConnectionMocks() method. This will make the exceptions below - // unnecessary. struct ClientCodecError : public std::runtime_error { ClientCodecError(Http::Status&& status) : std::runtime_error(std::string(status.message())), status_(std::move(status)) {} @@ -84,19 +72,19 @@ class Http2CodecImplTestFixture { struct ConnectionWrapper { Http::Status dispatch(const Buffer::Instance& data, ConnectionImpl& connection) { connection_ = &connection; - Http::Status status = Http::okStatus(); buffer_.add(data); return dispatchBufferedData(); } Http::Status dispatchBufferedData() { Http::Status status = Http::okStatus(); - if (!dispatching_) { + if (!dispatching_ && status_.ok()) { while (buffer_.length() > 0) { dispatching_ = true; status = connection_->dispatch(buffer_); if (!status.ok()) { - // Exit early if we hit an error status. + // Exit early if we hit an error status and record it for verification in the test. + status_.Update(status); return status; } dispatching_ = false; @@ -108,6 +96,7 @@ class Http2CodecImplTestFixture { bool dispatching_{}; Buffer::OwnedImpl buffer_; ConnectionImpl* connection_{}; + Http::Status status_; }; enum SettingsTupleIndex { @@ -168,17 +157,11 @@ class Http2CodecImplTestFixture { if (corrupt_metadata_frame_) { corruptMetadataFramePayload(data); } - auto status = server_wrapper_.dispatch(data, *server_); - if (!status.ok()) { - throw ServerCodecError(std::move(status)); - } + server_wrapper_.dispatch(data, *server_).IgnoreError(); })); ON_CALL(server_connection_, write(_, _)) .WillByDefault(Invoke([&](Buffer::Instance& data, bool) -> void { - auto status = client_wrapper_.dispatch(data, *client_); - if (!status.ok()) { - throw ClientCodecError(std::move(status)); - } + client_wrapper_.dispatch(data, *client_).IgnoreError(); })); } @@ -421,7 +404,9 @@ TEST_P(Http2CodecImplTest, TrailerStatus) { response_encoder_->encodeHeaders(response_headers, false); // nghttp2 doesn't allow :status in trailers - EXPECT_THROW(response_encoder_->encode100ContinueHeaders(continue_headers), ClientCodecError); + response_encoder_->encode100ContinueHeaders(continue_headers); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -471,7 +456,9 @@ TEST_P(Http2CodecImplTest, Invalid101SwitchingProtocols) { TestResponseHeaderMapImpl upgrade_headers{{":status", "101"}}; EXPECT_CALL(response_decoder_, decodeHeaders_(_, _)).Times(0); - EXPECT_THROW(response_encoder_->encodeHeaders(upgrade_headers, false), ClientCodecError); + response_encoder_->encodeHeaders(upgrade_headers, false); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } @@ -484,7 +471,9 @@ TEST_P(Http2CodecImplTest, InvalidContinueWithFin) { EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, true).ok()); TestResponseHeaderMapImpl continue_headers{{":status", "100"}}; - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + response_encoder_->encodeHeaders(continue_headers, true); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); } @@ -553,7 +542,9 @@ TEST_P(Http2CodecImplTest, InvalidRepeatContinue) { EXPECT_CALL(response_decoder_, decode100ContinueHeaders_(_)); response_encoder_->encode100ContinueHeaders(continue_headers); - EXPECT_THROW(response_encoder_->encodeHeaders(continue_headers, true), ClientCodecError); + response_encoder_->encodeHeaders(continue_headers, true); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -611,7 +602,9 @@ TEST_P(Http2CodecImplTest, Invalid204WithContentLength) { "debug", "Invalid HTTP header field was received: frame type: 1, stream: 1, name: [content-length], " "value: [3]", - EXPECT_THROW(response_encoder_->encodeHeaders(response_headers, false), ClientCodecError)); + response_encoder_->encodeHeaders(response_headers, false)); + EXPECT_FALSE(client_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(client_wrapper_.status_)); EXPECT_EQ(1, client_stats_store_.counter("http2.rx_messaging_error").value()); }; @@ -841,8 +834,11 @@ TEST_P(Http2CodecImplTest, BadMetadataVecReceivedTest) { metadata_map_vector.push_back(std::move(metadata_map_ptr)); corrupt_metadata_frame_ = true; - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeMetadata(metadata_map_vector), ServerCodecError, - "The user callback function failed"); + request_encoder_->encodeMetadata(metadata_map_vector); + // The error is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "The user callback function failed"); } // Encode response metadata while dispatching request data from the client, so @@ -1899,6 +1895,8 @@ TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsConsumeErr EXPECT_EQ(1, server_stats_store_.counter("http2.tx_reset").value()); EXPECT_EQ(1, TestUtility::findGauge(client_stats_store_, "http2.streams_active")->value()); EXPECT_EQ(1, TestUtility::findGauge(server_stats_store_, "http2.streams_active")->value()); + // The server codec should not fail since the error is "consumed". + EXPECT_TRUE(server_wrapper_.status_.ok()); } TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsIgnoreError) { @@ -1937,14 +1935,17 @@ TEST_P(Http2CodecImplStreamLimitTest, LazyDecreaseMaxConcurrentStreamsIgnoreErro request_encoder_ = &client_->newStream(response_decoder_); setupDefaultConnectionMocks(); - EXPECT_THROW_WITH_MESSAGE(request_encoder_->encodeHeaders(request_headers, true).IgnoreError(), - ServerCodecError, "The user callback function failed"); + EXPECT_TRUE(request_encoder_->encodeHeaders(request_headers, true).ok()); + // The server codec should fail since there are no available streams. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isCodecProtocolError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "The user callback function failed"); EXPECT_EQ(0, server_stats_store_.counter("http2.stream_refused_errors").value()); EXPECT_EQ(0, server_stats_store_.counter("http2.tx_reset").value()); // Not verifying the http2.streams_active server/client gauges here as the - // EXPECT_THROW_WITH_MESSAGE above doesn't let us fully capture the behavior of the real system. + // test dispatch function doesn't let us fully capture the behavior of the real system. // In the real world, the status returned from dispatch would trigger a connection close which // would result in the active stream gauges to go down to 0. } @@ -2437,8 +2438,11 @@ TEST_P(Http2CodecImplTest, PingFlood) { buffer.move(frame); })); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many control frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The PING flood is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many control frames in the outbound queue."); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_control_flood").value()); } @@ -2507,8 +2511,11 @@ TEST_P(Http2CodecImplTest, PingFloodCounterReset) { // 1 more ping frame should overflow the outbound frame limit. EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many control frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 PING too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many control frames in the outbound queue."); } // Verify that codec detects flood of outbound HEADER frames @@ -2678,8 +2685,11 @@ TEST_P(Http2CodecImplTest, PingStacksWithDataFlood) { } // Send one PING frame above the outbound queue size limit EXPECT_EQ(0, nghttp2_submit_ping(client_->session(), NGHTTP2_FLAG_NONE, nullptr)); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many frames in the outbound queue."); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 frame too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many frames in the outbound queue."); EXPECT_EQ(1, server_stats_store_.counter("http2.outbound_flood").value()); } @@ -2774,8 +2784,11 @@ TEST_P(Http2CodecImplTest, MetadataFlood) { TEST_P(Http2CodecImplTest, PriorityFlood) { priorityFlood(); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many PRIORITY frames"); + client_->sendPendingFrames().IgnoreError(); + // The PRIORITY flood is detected by the server codec. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many PRIORITY frames"); } TEST_P(Http2CodecImplTest, PriorityFloodOverride) { @@ -2787,8 +2800,11 @@ TEST_P(Http2CodecImplTest, PriorityFloodOverride) { TEST_P(Http2CodecImplTest, WindowUpdateFlood) { windowUpdateFlood(); - EXPECT_THROW_WITH_MESSAGE(client_->sendPendingFrames().IgnoreError(), ServerCodecError, - "Too many WINDOW_UPDATE frames"); + client_->sendPendingFrames().IgnoreError(); + // The server codec should fail when it gets 1 WINDOW_UPDATE frame too many. + EXPECT_FALSE(server_wrapper_.status_.ok()); + EXPECT_TRUE(isBufferFloodError(server_wrapper_.status_)); + EXPECT_EQ(server_wrapper_.status_.message(), "Too many WINDOW_UPDATE frames"); } TEST_P(Http2CodecImplTest, WindowUpdateFloodOverride) { diff --git a/test/common/network/dns_resolver/BUILD b/test/common/network/dns_resolver/BUILD index 87f8ba300554..a357f6f20a83 100644 --- a/test/common/network/dns_resolver/BUILD +++ b/test/common/network/dns_resolver/BUILD @@ -16,7 +16,7 @@ envoy_cc_test( "--runtime-feature-disable-for-tests=envoy.restart_features.use_apple_api_for_dns_lookups", ], deps = [ - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/extensions/network/dns_resolver/cares:config", "//test/mocks/network:network_mocks", ], diff --git a/test/common/network/dns_resolver/dns_factory_test.cc b/test/common/network/dns_resolver/dns_factory_test.cc index 45d959b87b80..813b64a52971 100644 --- a/test/common/network/dns_resolver/dns_factory_test.cc +++ b/test/common/network/dns_resolver/dns_factory_test.cc @@ -1,5 +1,5 @@ #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "test/mocks/network/mocks.h" diff --git a/test/common/quic/envoy_quic_utils_test.cc b/test/common/quic/envoy_quic_utils_test.cc index d8d290975f08..99794c49700d 100644 --- a/test/common/quic/envoy_quic_utils_test.cc +++ b/test/common/quic/envoy_quic_utils_test.cc @@ -171,5 +171,26 @@ TEST(EnvoyQuicUtilsTest, deduceSignatureAlgorithmFromNullPublicKey) { EXPECT_EQ("Invalid leaf cert, bad public key", error); } +TEST(EnvoyQuicUtilsTest, ConvertQuicConfig) { + envoy::config::core::v3::QuicProtocolOptions config; + quic::QuicConfig quic_config; + + // Test defaults. + convertQuicConfig(config, quic_config); + EXPECT_EQ(100, quic_config.GetMaxBidirectionalStreamsToSend()); + EXPECT_EQ(100, quic_config.GetMaxUnidirectionalStreamsToSend()); + EXPECT_EQ(16777216, quic_config.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); + EXPECT_EQ(25165824, quic_config.GetInitialSessionFlowControlWindowToSend()); + + // Test converting values. + config.mutable_max_concurrent_streams()->set_value(2); + config.mutable_initial_stream_window_size()->set_value(3); + config.mutable_initial_connection_window_size()->set_value(50); + convertQuicConfig(config, quic_config); + EXPECT_EQ(2, quic_config.GetMaxBidirectionalStreamsToSend()); + EXPECT_EQ(2, quic_config.GetMaxUnidirectionalStreamsToSend()); + EXPECT_EQ(3, quic_config.GetInitialMaxStreamDataBytesIncomingBidirectionalToSend()); +} + } // namespace Quic } // namespace Envoy diff --git a/test/common/upstream/BUILD b/test/common/upstream/BUILD index 13c4ea496b1f..436ff9945cb3 100644 --- a/test/common/upstream/BUILD +++ b/test/common/upstream/BUILD @@ -672,6 +672,8 @@ envoy_cc_test( "//test/mocks/upstream:cluster_manager_mocks", "//test/mocks/upstream:health_checker_mocks", "//test/mocks/upstream:priority_set_mocks", + "//test/mocks/upstream:thread_aware_load_balancer_mocks", + "//test/mocks/upstream:typed_load_balancer_factory_mocks", "//test/test_common:registry_lib", "//test/test_common:test_runtime_lib", "//test/test_common:utility_lib", diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 82d281a1cd72..b177712ca4cc 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -39,6 +39,8 @@ #include "test/mocks/upstream/cluster_manager.h" #include "test/mocks/upstream/health_checker.h" #include "test/mocks/upstream/priority_set.h" +#include "test/mocks/upstream/thread_aware_load_balancer.h" +#include "test/mocks/upstream/typed_load_balancer_factory.h" #include "test/test_common/environment.h" #include "test/test_common/registry.h" #include "test/test_common/test_runtime.h" @@ -2079,6 +2081,138 @@ TEST_F(StaticClusterImplTest, UnsupportedLBType) { EnvoyException, "invalid value \"fakelbtype\""); } +// load_balancing_policy should be used when lb_policy is set to LOAD_BALANCING_POLICY_CONFIG. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + lb_policy: LOAD_BALANCING_POLICY_CONFIG + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + +// load_balancing_policy should also be used when lb_policy is set to something else besides +// LOAD_BALANCING_POLICY_CONFIG. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithOtherLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + lb_policy: ROUND_ROBIN + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + +// load_balancing_policy should also be used when lb_policy is omitted. +TEST_F(StaticClusterImplTest, LoadBalancingPolicyWithoutLbPolicy) { + const std::string yaml = R"EOF( + name: staticcluster + connect_timeout: 0.25s + type: static + load_balancing_policy: + policies: + - typed_extension_config: + name: custom_lb + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + value: + foo: "bar" + load_assignment: + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 10.0.0.1 + port_value: 11001 + )EOF"; + + NiceMock factory; + EXPECT_CALL(factory, name()).WillRepeatedly(Return("custom_lb")); + Registry::InjectFactory registered_factory(factory); + + envoy::config::cluster::v3::Cluster cluster_config = parseClusterFromV3Yaml(yaml); + Envoy::Stats::ScopePtr scope = stats_.createScope(fmt::format( + "cluster.{}.", cluster_config.alt_stat_name().empty() ? cluster_config.name() + : cluster_config.alt_stat_name())); + Envoy::Server::Configuration::TransportSocketFactoryContextImpl factory_context( + admin_, ssl_context_manager_, *scope, cm_, local_info_, dispatcher_, stats_, + singleton_manager_, tls_, validation_visitor_, *api_, options_); + StaticClusterImpl cluster(cluster_config, runtime_, factory_context, std::move(scope), true); + cluster.initialize([] {}); + + EXPECT_EQ(1UL, cluster.prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(LoadBalancerType::LoadBalancingPolicyConfig, cluster.info()->lbType()); + EXPECT_TRUE(cluster.info()->addedViaApi()); +} + TEST_F(StaticClusterImplTest, MalformedHostIP) { const std::string yaml = R"EOF( name: name diff --git a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc index fccc8902d00c..474d74e96aa8 100644 --- a/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc +++ b/test/extensions/filters/http/grpc_json_transcoder/grpc_json_transcoder_integration_test.cc @@ -1321,72 +1321,5 @@ TEST_P(OverrideConfigGrpcJsonTranscoderIntegrationTest, RouteOverride) { R"({"shelves":[{"id":"20","theme":"Children"},{"id":"1","theme":"Foo"}]})"); }; -// Tests to ensure transcoding buffer limits do not apply when the runtime feature is disabled. -class BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest - : public GrpcJsonTranscoderIntegrationTest { -public: - void SetUp() override { - setUpstreamProtocol(Http::CodecType::HTTP2); - const std::string filter = - R"EOF( - name: grpc_json_transcoder - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.grpc_json_transcoder.v3.GrpcJsonTranscoder - proto_descriptor : "{}" - services : "bookstore.Bookstore" - )EOF"; - config_helper_.prependFilter( - fmt::format(filter, TestEnvironment::runfilesPath("test/proto/bookstore.descriptor"))); - - // Disable runtime feature. - config_helper_.addRuntimeOverride( - "envoy.reloadable_features.grpc_json_transcoder_adhere_to_buffer_limits", "false"); - } -}; -INSTANTIATE_TEST_SUITE_P(IpVersions, BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, - testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), - TestUtility::ipTestParamsToString); - -TEST_P(BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, UnaryPostRequestExceedsBufferLimit) { - // Request body is more than 20 bytes. - config_helper_.setBufferLimits(2 << 20, 20); - HttpIntegrationTest::initialize(); - - // Transcoding succeeds. - testTranscoding( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf"}, - {":authority", "host"}, - {"content-type", "application/json"}}, - R"({"theme": "Children 0123456789 0123456789 0123456789 0123456789"})", - {R"(shelf { theme: "Children 0123456789 0123456789 0123456789 0123456789" })"}, {R"(id: 1)"}, - Status(), - Http::TestResponseHeaderMapImpl{{":status", "200"}, - {"content-type", "application/json"}, - {"content-length", "10"}, - {"grpc-status", "0"}}, - R"({"id":"1"})"); -} - -TEST_P(BufferLimitsDisabledGrpcJsonTranscoderIntegrationTest, UnaryPostResponseExceedsBufferLimit) { - // Request body is less than 35 bytes. - // Response body is more than 35 bytes. - config_helper_.setBufferLimits(2 << 20, 35); - HttpIntegrationTest::initialize(); - - // Transcoding succeeds. However, the downstream client is unable to buffer the full response. - // We can tell these errors are NOT from the transcoder because the response body is too generic. - testTranscoding( - Http::TestRequestHeaderMapImpl{{":method", "POST"}, - {":path", "/shelf"}, - {":authority", "host"}, - {"content-type", "application/json"}}, - R"({"theme": "Children"})", {R"(shelf { theme: "Children" })"}, - {R"(id: 20 theme: "Children 0123456789 0123456789 0123456789 0123456789" )"}, Status(), - Http::TestResponseHeaderMapImpl{ - {":status", "500"}, {"content-type", "text/plain"}, {"content-length", "21"}}, - R"(Internal Server Error)"); -} - } // namespace } // namespace Envoy diff --git a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc index 8499c8e3428d..f5e68eae1d9a 100644 --- a/test/extensions/filters/listener/http_inspector/http_inspector_test.cc +++ b/test/extensions/filters/listener/http_inspector/http_inspector_test.cc @@ -44,11 +44,10 @@ class HttpInspectorTest : public testing::Test { if (include_inline_recv) { EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) - .WillOnce(Return(Api::SysCallSizeResult{static_cast(0), 0})); + .WillOnce(Return(Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN})); - EXPECT_CALL(dispatcher_, - createFileEvent_(_, _, Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed)) + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read)) .WillOnce(DoAll(SaveArg<1>(&file_event_callback_), ReturnNew>())); @@ -334,11 +333,10 @@ TEST_F(HttpInspectorTest, InspectHttp2) { TEST_F(HttpInspectorTest, ReadClosed) { init(); - EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)); - EXPECT_CALL(socket_, close()); - EXPECT_CALL(cb_, continueFilterChain(true)); - socket_.close(); - file_event_callback_(Event::FileReadyType::Closed); + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Return(Api::SysCallSizeResult{0, 0})); + EXPECT_CALL(cb_, continueFilterChain(false)); + file_event_callback_(Event::FileReadyType::Read); EXPECT_EQ(0, cfg_->stats().http2_found_.value()); } diff --git a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc index 902855a7b4c8..0a9d074d7138 100644 --- a/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc +++ b/test/extensions/filters/listener/tls_inspector/tls_inspector_test.cc @@ -17,6 +17,7 @@ using testing::InSequence; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; +using testing::Return; using testing::ReturnNew; using testing::ReturnRef; using testing::SaveArg; @@ -46,11 +47,10 @@ class TlsInspectorTest : public testing::TestWithParam Api::SysCallSizeResult { ENVOY_LOG_MISC(error, "In mock syscall recv {} {} {} {}", fd, buffer, length, flag); - return Api::SysCallSizeResult{static_cast(0), 0}; + return Api::SysCallSizeResult{ssize_t(-1), SOCKET_ERROR_AGAIN}; })); - EXPECT_CALL(dispatcher_, - createFileEvent_(_, _, Event::PlatformDefaultTriggerType, - Event::FileReadyType::Read | Event::FileReadyType::Closed)) + EXPECT_CALL(dispatcher_, createFileEvent_(_, _, Event::PlatformDefaultTriggerType, + Event::FileReadyType::Read)) .WillOnce( DoAll(SaveArg<1>(&file_event_callback_), ReturnNew>())); filter_->onAccept(cb_); @@ -85,8 +85,10 @@ TEST_P(TlsInspectorTest, MaxClientHelloSize) { // Test that the filter detects Closed events and terminates. TEST_P(TlsInspectorTest, ConnectionClosed) { init(); + EXPECT_CALL(os_sys_calls_, recv(42, _, _, MSG_PEEK)) + .WillOnce(Return(Api::SysCallSizeResult{0, 0})); EXPECT_CALL(cb_, continueFilterChain(false)); - file_event_callback_(Event::FileReadyType::Closed); + file_event_callback_(Event::FileReadyType::Read); EXPECT_EQ(1, cfg_->stats().connection_closed_.value()); } diff --git a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc index 8cdbe6162927..d74e12fa8f77 100644 --- a/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc +++ b/test/extensions/filters/network/dubbo_proxy/conn_manager_test.cc @@ -832,7 +832,7 @@ TEST_F(ConnectionManagerTest, ResponseWithUnknownSequenceID) { TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); + writeHessianRequestMessage(buffer_, false, false, 233333); config_->setupFilterChain(2, 0); config_->expectOnDestroy(); @@ -847,8 +847,10 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { const std::string fake_response("mock dubbo response"); NiceMock direct_response; EXPECT_CALL(direct_response, encode(_, _, _)) - .WillOnce(Invoke([&](MessageMetadata&, Protocol&, + .WillOnce(Invoke([&](MessageMetadata& metadata, Protocol&, Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + // Validate request id. + EXPECT_EQ(metadata.requestId(), 233333); buffer.add(fake_response); return DubboFilters::DirectResponse::ResponseType::SuccessReply; })); @@ -878,7 +880,7 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalReply) { TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { initializeFilter(); - writeHessianRequestMessage(buffer_, false, false, 1); + writeHessianRequestMessage(buffer_, false, false, 233334); config_->setupFilterChain(2, 0); config_->expectOnDestroy(); @@ -893,8 +895,10 @@ TEST_F(ConnectionManagerTest, OnDataWithFilterSendsLocalErrorReply) { const std::string fake_response("mock dubbo response"); NiceMock direct_response; EXPECT_CALL(direct_response, encode(_, _, _)) - .WillOnce(Invoke([&](MessageMetadata&, Protocol&, + .WillOnce(Invoke([&](MessageMetadata& metadata, Protocol&, Buffer::Instance& buffer) -> DubboFilters::DirectResponse::ResponseType { + // Validate request id. + EXPECT_EQ(metadata.requestId(), 233334); buffer.add(fake_response); return DubboFilters::DirectResponse::ResponseType::ErrorReply; })); diff --git a/test/extensions/network/dns_resolver/apple/BUILD b/test/extensions/network/dns_resolver/apple/BUILD index 0e48f5fee940..d949920ad9d9 100644 --- a/test/extensions/network/dns_resolver/apple/BUILD +++ b/test/extensions/network/dns_resolver/apple/BUILD @@ -23,7 +23,7 @@ envoy_cc_test( "//source/common/stats:isolated_store_lib", "//source/common/event:dispatcher_lib", "//source/common/network:address_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/common:random_generator_lib", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", diff --git a/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc index 2c70208fc518..7fb43d13947a 100644 --- a/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc +++ b/test/extensions/network/dns_resolver/apple/apple_dns_impl_test.cc @@ -11,7 +11,7 @@ #include "envoy/network/dns.h" #include "source/common/network/address_impl.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/utility.h" #include "source/common/stats/isolated_store_impl.h" #include "source/extensions/network/dns_resolver/apple/apple_dns_impl.h" diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index 303efca46b25..528b1c0cc2af 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -362,6 +362,9 @@ class BaseIntegrationTest : protected Logger::Loggable { void mergeOptions(envoy::config::core::v3::Http2ProtocolOptions& options) { upstream_config_.http2_options_.MergeFrom(options); } + void mergeOptions(envoy::config::listener::v3::QuicProtocolOptions& options) { + upstream_config_.quic_options_.MergeFrom(options); + } std::unique_ptr upstream_stats_store_; diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 2129334241ae..2f33f3c3f519 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -510,7 +510,7 @@ FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket_factory, Network::SocketPtr&& listen_socket, const FakeUpstreamConfig& config) : http_type_(config.upstream_protocol_), http2_options_(config.http2_options_), - http3_options_(config.http3_options_), + http3_options_(config.http3_options_), quic_options_(config.quic_options_), socket_(Network::SocketSharedPtr(listen_socket.release())), socket_factory_(std::make_unique(socket_)), api_(Api::createApiForTest(stats_store_)), time_system_(config.time_system_), diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 83bbb6db124d..f667bcb2a225 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -579,6 +579,7 @@ struct FakeUpstreamConfig { absl::optional udp_fake_upstream_; envoy::config::core::v3::Http2ProtocolOptions http2_options_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; + envoy::config::listener::v3::QuicProtocolOptions quic_options_; uint32_t max_request_headers_kb_ = Http::DEFAULT_MAX_REQUEST_HEADERS_KB; uint32_t max_request_headers_count_ = Http::DEFAULT_MAX_HEADERS_COUNT; envoy::config::core::v3::HttpProtocolOptions::HeadersWithUnderscoresAction @@ -760,7 +761,7 @@ class FakeUpstream : Logger::Loggable, if (is_quic) { #if defined(ENVOY_ENABLE_QUIC) udp_listener_config_.listener_factory_ = std::make_unique( - envoy::config::listener::v3::QuicProtocolOptions(), 1, parent_.quic_stat_names_); + parent_.quic_options_, 1, parent_.quic_stat_names_); // Initialize QUICHE flags. quiche::FlagRegistry::getInstance(); #else @@ -823,6 +824,7 @@ class FakeUpstream : Logger::Loggable, const envoy::config::core::v3::Http2ProtocolOptions http2_options_; const envoy::config::core::v3::Http3ProtocolOptions http3_options_; + envoy::config::listener::v3::QuicProtocolOptions quic_options_; Network::SocketSharedPtr socket_; Network::ListenSocketFactoryPtr socket_factory_; ConditionalInitializer server_initialized_; diff --git a/test/integration/multiplexed_upstream_integration_test.cc b/test/integration/multiplexed_upstream_integration_test.cc index d47abf5f0ea2..4acf747d777c 100644 --- a/test/integration/multiplexed_upstream_integration_test.cc +++ b/test/integration/multiplexed_upstream_integration_test.cc @@ -205,7 +205,6 @@ TEST_P(Http2UpstreamIntegrationTest, LargeSimultaneousRequestWithBufferLimits) { void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_bytes, uint32_t max_response_bytes, uint32_t num_requests) { - autonomous_allow_incomplete_streams_ = true; TestRandomGenerator rand; std::vector encoders; std::vector responses; @@ -236,11 +235,8 @@ void Http2UpstreamIntegrationTest::manySimultaneousRequests(uint32_t request_byt ASSERT_TRUE(responses[i]->waitForEndStream()); if (i % 2 != 0) { EXPECT_TRUE(responses[i]->complete()); - // TODO(18160) remove this if and always check for 200 and body length. - if (num_requests <= 100 || upstreamProtocol() != Http::CodecType::HTTP3) { - EXPECT_EQ("200", responses[i]->headers().getStatusValue()); - EXPECT_EQ(response_bytes[i], responses[i]->body().length()); - } + EXPECT_EQ("200", responses[i]->headers().getStatusValue()); + EXPECT_EQ(response_bytes[i], responses[i]->body().length()); } else { // Upstream stream reset. EXPECT_EQ("503", responses[i]->headers().getStatusValue()); @@ -255,13 +251,23 @@ TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequest) { manySimultaneousRequests(1024, 1024, 100); } -#ifdef NDEBUG -// TODO(alyssawilk) this causes crashes in debug mode for QUIC due to a race -// condition between Envoy's stream accounting and QUICE's. Debug and fix. TEST_P(Http2UpstreamIntegrationTest, TooManySimultaneousRequests) { manySimultaneousRequests(1024, 1024, 200); } -#endif + +TEST_P(Http2UpstreamIntegrationTest, ManySimultaneousRequestsTightUpstreamLimits) { + if (upstreamProtocol() == Http::CodecType::HTTP2) { + return; + } + envoy::config::core::v3::Http2ProtocolOptions config; + config.mutable_max_concurrent_streams()->set_value(1); + mergeOptions(config); + envoy::config::listener::v3::QuicProtocolOptions options; + options.mutable_quic_protocol_options()->mutable_max_concurrent_streams()->set_value(1); + mergeOptions(options); + + manySimultaneousRequests(1024, 1024, 10); +} TEST_P(Http2UpstreamIntegrationTest, ManyLargeSimultaneousRequestWithBufferLimits) { config_helper_.setBufferLimits(1024, 1024); // Set buffer limits upstream and downstream. diff --git a/test/integration/tcp_tunneling_integration_test.cc b/test/integration/tcp_tunneling_integration_test.cc index ea3d79afd589..b98c29034882 100644 --- a/test/integration/tcp_tunneling_integration_test.cc +++ b/test/integration/tcp_tunneling_integration_test.cc @@ -488,39 +488,83 @@ class TcpTunnelingIntegrationTest : public HttpProtocolIntegrationTest { }); HttpProtocolIntegrationTest::SetUp(); } + + void setUpConnection(FakeHttpConnectionPtr& fake_upstream_connection) { + // Start a connection, and verify the upgrade headers are received upstream. + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); + if (!fake_upstream_connection) { + ASSERT_TRUE( + fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection)); + } + ASSERT_TRUE(fake_upstream_connection->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + + // Send upgrade headers downstream, fully establishing the connection. + upstream_request_->encodeHeaders(default_response_headers_, false); + } + + void sendBidiData(FakeHttpConnectionPtr& fake_upstream_connection, bool send_goaway = false) { + // Send some data from downstream to upstream, and make sure it goes through. + ASSERT_TRUE(tcp_client_->write("hello", false)); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + + if (send_goaway) { + fake_upstream_connection->encodeGoAway(); + } + // Send data from upstream to downstream. + upstream_request_->encodeData(12, false); + ASSERT_TRUE(tcp_client_->waitForData(12)); + } + + void closeConnection(FakeHttpConnectionPtr& fake_upstream_connection) { + // Now send more data and close the TCP client. This should be treated as half close, so the + // data should go through. + ASSERT_TRUE(tcp_client_->write("hello", false)); + tcp_client_->close(); + ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + if (upstreamProtocol() == Http::CodecType::HTTP1) { + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); + } else { + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + // If the upstream now sends 'end stream' the connection is fully closed. + upstream_request_->encodeData(0, true); + } + } + + IntegrationTcpClientPtr tcp_client_; }; TEST_P(TcpTunnelingIntegrationTest, Basic) { initialize(); - // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - - // Send upgrade headers downstream, fully establishing the connection. - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); + closeConnection(fake_upstream_connection_); +} - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); +TEST_P(TcpTunnelingIntegrationTest, SendDataUpstreamAfterUpstreamClose) { + if (upstreamProtocol() == Http::CodecType::HTTP1) { + // HTTP/1.1 can't frame with FIN bits. + return; + } + initialize(); - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); + // Close upstream. + upstream_request_->encodeData(2, true); + tcp_client_->waitForHalfClose(); - // Now send more data and close the TCP client. This should be treated as half close, so the data - // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); + // Now send data upstream. + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + + // Finally close and clean up. + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - // If the upstream now sends 'end stream' the connection is fully closed. - upstream_request_->encodeData(0, true); } } @@ -548,7 +592,7 @@ TEST_P(TcpTunnelingIntegrationTest, BasicUsePost) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -557,33 +601,39 @@ TEST_P(TcpTunnelingIntegrationTest, BasicUsePost) { // Send upgrade headers downstream, fully establishing the connection. upstream_request_->encodeHeaders(default_response_headers_, false); - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + sendBidiData(fake_upstream_connection_); + closeConnection(fake_upstream_connection_); +} - // Now send more data and close the TCP client. This should be treated as half close, so the data - // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); +TEST_P(TcpTunnelingIntegrationTest, Goaway) { if (upstreamProtocol() == Http::CodecType::HTTP1) { - ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - } else { - ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); - // If the upstream now sends 'end stream' the connection is fully closed. - upstream_request_->encodeData(0, true); + return; } + initialize(); + + // Send bidirectional data, including a goaway. + // This should result in the first connection being torn down. + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_, true); + closeConnection(fake_upstream_connection_); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 1); + + // Make sure a subsequent connection can be established successfully. + FakeHttpConnectionPtr fake_upstream_connection; + setUpConnection(fake_upstream_connection); + sendBidiData(fake_upstream_connection); + closeConnection(fake_upstream_connection_); + + // Make sure the last stream is finished before doing test teardown. + fake_upstream_connection->encodeGoAway(); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 2); } TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -600,23 +650,15 @@ TEST_P(TcpTunnelingIntegrationTest, InvalidResponseHeaders) { // The connection should be fully closed, but the client has no way of knowing // that. Ensure the FIN is read and clean up state. - tcp_client->waitForHalfClose(); - tcp_client->close(); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); } TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); - - // Send data in both directions. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); + setUpConnection(fake_upstream_connection_); + sendBidiData(fake_upstream_connection_); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. @@ -624,19 +666,19 @@ TEST_P(TcpTunnelingIntegrationTest, CloseUpstreamFirst) { if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->close()); } - ASSERT_TRUE(tcp_client->waitForData(12)); - tcp_client->waitForHalfClose(); + ASSERT_TRUE(tcp_client_->waitForData(12)); + tcp_client_->waitForHalfClose(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } else { // Attempt to send data upstream. // should go through. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - ASSERT_TRUE(tcp_client->write("hello", true)); + ASSERT_TRUE(tcp_client_->write("hello", true)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -649,16 +691,11 @@ TEST_P(TcpTunnelingIntegrationTest, ResetStreamTest) { enableHalfClose(false); initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Reset the stream. upstream_request_->encodeResetStream(); - tcp_client->waitForDisconnect(); + tcp_client_->waitForDisconnect(); } TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { @@ -681,20 +718,16 @@ TEST_P(TcpTunnelingIntegrationTest, TestIdletimeoutWithLargeOutstandingData) { initialize(); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); std::string data(1024 * 16, 'a'); - ASSERT_TRUE(tcp_client->write(data)); + ASSERT_TRUE(tcp_client_->write(data)); upstream_request_->encodeData(data, false); - tcp_client->waitForDisconnect(); + tcp_client_->waitForDisconnect(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } else { ASSERT_TRUE(upstream_request_->waitForReset()); } @@ -707,36 +740,32 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyDownstreamFlush) { config_helper_.setBufferLimits(size / 4, size / 4); initialize(); - std::string data(size, 'a'); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); - tcp_client->readDisable(true); + tcp_client_->readDisable(true); + std::string data(size, 'a'); if (upstreamProtocol() == Http::CodecType::HTTP1) { - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); upstream_request_->encodeData(data, true); ASSERT_TRUE(fake_upstream_connection_->close()); } else { - ASSERT_TRUE(tcp_client->write("", true)); + ASSERT_TRUE(tcp_client_->write("", true)); // This ensures that readDisable(true) has been run on its thread - // before tcp_client starts writing. + // before tcp_client_ starts writing. ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); upstream_request_->encodeData(data, true); } test_server_->waitForCounterGe("cluster.cluster_0.upstream_flow_control_paused_reading_total", 1); - tcp_client->readDisable(false); - tcp_client->waitForData(data); - tcp_client->waitForHalfClose(); + tcp_client_->readDisable(false); + tcp_client_->waitForData(data); + tcp_client_->waitForHalfClose(); if (upstreamProtocol() == Http::CodecType::HTTP1) { - tcp_client->close(); + tcp_client_->close(); } } @@ -754,22 +783,19 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { config_helper_.setBufferLimits(size, size); initialize(); - std::string data(size, 'a'); - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); + upstream_request_->readDisable(true); upstream_request_->encodeData("hello", false); // This ensures that fake_upstream_connection->readDisable has been run on its thread - // before tcp_client starts writing. - ASSERT_TRUE(tcp_client->waitForData(5)); + // before tcp_client_ starts writing. + ASSERT_TRUE(tcp_client_->waitForData(5)); - ASSERT_TRUE(tcp_client->write(data, true)); + std::string data(size, 'a'); + ASSERT_TRUE(tcp_client_->write(data, true)); if (upstreamProtocol() == Http::CodecType::HTTP1) { - tcp_client->close(); + tcp_client_->close(); upstream_request_->readDisable(false); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); @@ -782,7 +808,7 @@ TEST_P(TcpTunnelingIntegrationTest, TcpProxyUpstreamFlush) { ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, size)); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); upstream_request_->encodeData("world", true); - tcp_client->waitForHalfClose(); + tcp_client_->waitForHalfClose(); } } @@ -793,42 +819,37 @@ TEST_P(TcpTunnelingIntegrationTest, ConnectionReuse) { } initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData("world1", true); - tcp_client1->waitForData("world1"); - tcp_client1->waitForHalfClose(); - tcp_client1->close(); + tcp_client_->waitForData("world1"); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // The new CONNECT stream is established in the existing h2 connection. ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello2", false)); + ASSERT_TRUE(tcp_client_2->write("hello2", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); // Send data from upstream to downstream with an end stream and make sure the data is received // before the connection is half-closed. upstream_request_->encodeData("world2", true); - tcp_client2->waitForData("world2"); - tcp_client2->waitForHalfClose(); - tcp_client2->close(); + tcp_client_2->waitForData("world2"); + tcp_client_2->waitForHalfClose(); + tcp_client_2->close(); ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); } @@ -839,36 +860,31 @@ TEST_P(TcpTunnelingIntegrationTest, H1NoConnectionReuse) { } initialize(); - // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); - ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); - ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); - ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); - upstream_request_->encodeHeaders(default_response_headers_, false); + setUpConnection(fake_upstream_connection_); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream and close the connection // from downstream. upstream_request_->encodeData("world1", false); - tcp_client1->waitForData("world1"); - tcp_client1->close(); + tcp_client_->waitForData("world1"); + tcp_client_->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // A new connection is established ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello1", false)); + ASSERT_TRUE(tcp_client_2->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); - tcp_client2->close(); + tcp_client_2->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -881,41 +897,41 @@ TEST_P(TcpTunnelingIntegrationTest, H1UpstreamCloseNoConnectionReuse) { initialize(); // Establish a connection. - IntegrationTcpClientPtr tcp_client1 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_1 = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); // Send data in both directions. - ASSERT_TRUE(tcp_client1->write("hello1", false)); + ASSERT_TRUE(tcp_client_1->write("hello1", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello1")); // Send data from upstream to downstream and close the connection // from the upstream. upstream_request_->encodeData("world1", false); - tcp_client1->waitForData("world1"); + tcp_client_1->waitForData("world1"); ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client1->waitForHalfClose(); - tcp_client1->close(); + tcp_client_1->waitForHalfClose(); + tcp_client_1->close(); // Establish a new connection. - IntegrationTcpClientPtr tcp_client2 = makeTcpConnection(lookupPort("tcp_proxy")); + IntegrationTcpClientPtr tcp_client_2 = makeTcpConnection(lookupPort("tcp_proxy")); // A new connection is established ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); upstream_request_->encodeHeaders(default_response_headers_, false); - ASSERT_TRUE(tcp_client2->write("hello2", false)); + ASSERT_TRUE(tcp_client_2->write("hello2", false)); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, "hello2")); ASSERT_TRUE(fake_upstream_connection_->close()); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); - tcp_client2->waitForHalfClose(); - tcp_client2->close(); + tcp_client_2->waitForHalfClose(); + tcp_client_2->close(); } TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { @@ -925,7 +941,7 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -935,16 +951,10 @@ TEST_P(TcpTunnelingIntegrationTest, 2xxStatusCodeValidHttp1) { default_response_headers_.setStatus(enumToInt(Http::Code::Accepted)); upstream_request_->encodeHeaders(default_response_headers_, false); - // Send some data from downstream to upstream, and make sure it goes through. - ASSERT_TRUE(tcp_client->write("hello", false)); - ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - - // Send data from upstream to downstream. - upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + sendBidiData(fake_upstream_connection_); // Close the downstream connection and wait for upstream disconnect - tcp_client->close(); + tcp_client_->close(); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -955,7 +965,7 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); @@ -968,11 +978,11 @@ TEST_P(TcpTunnelingIntegrationTest, ContentLengthHeaderIgnoredHttp1) { // Send data from upstream to downstream. upstream_request_->encodeData(12, false); - ASSERT_TRUE(tcp_client->waitForData(12)); + ASSERT_TRUE(tcp_client_->waitForData(12)); // Now send some data and close the TCP client. - ASSERT_TRUE(tcp_client->write("hello", false)); - tcp_client->close(); + ASSERT_TRUE(tcp_client_->write("hello", false)); + tcp_client_->close(); ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } @@ -984,7 +994,7 @@ TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Using raw connection to be able to set Transfer-encoding header. FakeRawConnectionPtr fake_upstream_connection; ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); @@ -998,24 +1008,24 @@ TEST_P(TcpTunnelingIntegrationTest, TransferEncodingHeaderIgnoredHttp1) { fake_upstream_connection->write("HTTP/1.1 200 OK\r\nTransfer-encoding: chunked\r\n\r\n")); // Now send some data and close the TCP client. - ASSERT_TRUE(tcp_client->write("hello")); + ASSERT_TRUE(tcp_client_->write("hello")); ASSERT_TRUE( fake_upstream_connection->waitForData(FakeRawConnection::waitForInexactMatch("hello"))); // Close connections. ASSERT_TRUE(fake_upstream_connection->close()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); - tcp_client->close(); + tcp_client_->close(); } TEST_P(TcpTunnelingIntegrationTest, DeferTransmitDataUntilSuccessConnectResponseIsReceived) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Send some data straight away. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); @@ -1028,7 +1038,7 @@ TEST_P(TcpTunnelingIntegrationTest, DeferTransmitDataUntilSuccessConnectResponse ASSERT_TRUE(upstream_request_->waitForData(*dispatcher_, 5)); - tcp_client->close(); + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { @@ -1042,10 +1052,10 @@ TEST_P(TcpTunnelingIntegrationTest, NoDataTransmittedIfConnectFailureResponseIsR initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); // Send some data straight away. - ASSERT_TRUE(tcp_client->write("hello", false)); + ASSERT_TRUE(tcp_client_->write("hello", false)); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); @@ -1057,7 +1067,7 @@ TEST_P(TcpTunnelingIntegrationTest, NoDataTransmittedIfConnectFailureResponseIsR // Wait a bit, no data should go through. ASSERT_FALSE(upstream_request_->waitForData(*dispatcher_, 1, std::chrono::milliseconds(100))); - tcp_client->close(); + tcp_client_->close(); if (upstreamProtocol() == Http::CodecType::HTTP1) { ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); } else { @@ -1069,15 +1079,15 @@ TEST_P(TcpTunnelingIntegrationTest, UpstreamDisconnectBeforeResponseReceived) { initialize(); // Start a connection, and verify the upgrade headers are received upstream. - IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("tcp_proxy")); + tcp_client_ = makeTcpConnection(lookupPort("tcp_proxy")); ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); ASSERT_TRUE(fake_upstream_connection_->close()); - tcp_client->waitForHalfClose(); - tcp_client->close(); + tcp_client_->waitForHalfClose(); + tcp_client_->close(); } INSTANTIATE_TEST_SUITE_P(IpAndHttpVersions, TcpTunnelingIntegrationTest, diff --git a/test/mocks/network/BUILD b/test/mocks/network/BUILD index 483043b645ed..304cb8d8c44f 100644 --- a/test/mocks/network/BUILD +++ b/test/mocks/network/BUILD @@ -59,7 +59,7 @@ envoy_cc_mock( "//source/common/network:address_lib", "//source/common/network:socket_interface_lib", "//source/common/network:utility_lib", - "//source/common/network/dns_resolver:dns_factory_lib", + "//source/common/network/dns_resolver:dns_factory_util_lib", "//source/common/stats:isolated_store_lib", "//test/mocks/event:event_mocks", "//test/mocks/stream_info:stream_info_mocks", diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index e12ffbc89b89..820322ff59b0 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -18,7 +18,7 @@ #include "envoy/network/transport_socket.h" #include "envoy/stats/scope.h" -#include "source/common/network/dns_resolver/dns_factory.h" +#include "source/common/network/dns_resolver/dns_factory_util.h" #include "source/common/network/filter_manager_impl.h" #include "source/common/network/socket_interface.h" #include "source/common/network/socket_interface_impl.h" diff --git a/test/mocks/upstream/BUILD b/test/mocks/upstream/BUILD index e6c41713f545..85eadc255f91 100644 --- a/test/mocks/upstream/BUILD +++ b/test/mocks/upstream/BUILD @@ -93,6 +93,7 @@ envoy_cc_mock( ":thread_aware_load_balancer_mocks", ":thread_local_cluster_mocks", ":transport_socket_match_mocks", + ":typed_load_balancer_factory_mocks", "//envoy/http:async_client_interface", "//envoy/upstream:cluster_factory_interface", "//envoy/upstream:cluster_manager_interface", @@ -204,6 +205,15 @@ envoy_cc_mock( ], ) +envoy_cc_mock( + name = "typed_load_balancer_factory_mocks", + srcs = ["typed_load_balancer_factory.cc"], + hdrs = ["typed_load_balancer_factory.h"], + deps = [ + "//envoy/upstream:load_balancer_interface", + ], +) + envoy_cc_mock( name = "thread_local_cluster_mocks", srcs = ["thread_local_cluster.cc"], diff --git a/test/mocks/upstream/typed_load_balancer_factory.cc b/test/mocks/upstream/typed_load_balancer_factory.cc new file mode 100644 index 000000000000..7fff528012b0 --- /dev/null +++ b/test/mocks/upstream/typed_load_balancer_factory.cc @@ -0,0 +1,10 @@ +#include "typed_load_balancer_factory.h" + +namespace Envoy { +namespace Upstream { +MockTypedLoadBalancerFactory::MockTypedLoadBalancerFactory() = default; + +MockTypedLoadBalancerFactory::~MockTypedLoadBalancerFactory() = default; + +} // namespace Upstream +} // namespace Envoy diff --git a/test/mocks/upstream/typed_load_balancer_factory.h b/test/mocks/upstream/typed_load_balancer_factory.h new file mode 100644 index 000000000000..33f6849597af --- /dev/null +++ b/test/mocks/upstream/typed_load_balancer_factory.h @@ -0,0 +1,23 @@ +#pragma once + +#include "envoy/upstream/load_balancer.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Upstream { +class MockTypedLoadBalancerFactory : public TypedLoadBalancerFactory { +public: + MockTypedLoadBalancerFactory(); + ~MockTypedLoadBalancerFactory() override; + + // Upstream::TypedLoadBalancerFactory + MOCK_METHOD(std::string, name, (), (const)); + MOCK_METHOD(ThreadAwareLoadBalancerPtr, create, + (const PrioritySet& priority_set, ClusterStats& stats, Stats::Scope& stats_scope, + Runtime::Loader& runtime, Random::RandomGenerator& random, + const ::envoy::config::cluster::v3::LoadBalancingPolicy_Policy& lb_policy)); +}; +} // namespace Upstream +} // namespace Envoy diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index cfb8bed725f5..1a50e20be0ca 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -16,7 +16,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/json:90.1" "source/common/matcher:94.0" "source/common/network:94.4" # Flaky, `activateFileEvents`, `startSecureTransport` and `ioctl`, listener_socket do not always report LCOV -"source/common/network/dns_resolver:90.8" # A few lines of MacOS code not tested in linux scripts. Tested in MacOS scripts +"source/common/network/dns_resolver:90.7" # A few lines of MacOS code not tested in linux scripts. Tested in MacOS scripts "source/common/protobuf:95.3" "source/common/quic:91.8" "source/common/router:96.5" @@ -45,10 +45,10 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/http/kill_request:95.3" # Death tests don't report LCOV "source/extensions/filters/http/lua:96.4" "source/extensions/filters/http/wasm:95.8" -"source/extensions/filters/listener:96.2" -"source/extensions/filters/listener/http_inspector:95.9" +"source/extensions/filters/listener:95.9" +"source/extensions/filters/listener/http_inspector:95.8" "source/extensions/filters/listener/original_dst:93.3" -"source/extensions/filters/listener/tls_inspector:93.5" +"source/extensions/filters/listener/tls_inspector:92.3" "source/extensions/filters/network/common:96.0" "source/extensions/filters/network/common/redis:96.2" "source/extensions/filters/network/mongo_proxy:95.5" diff --git a/tools/dependency/cve_scan.py b/tools/dependency/cve_scan.py index e1d048ed93dc..af4e70aa7cd0 100755 --- a/tools/dependency/cve_scan.py +++ b/tools/dependency/cve_scan.py @@ -72,14 +72,25 @@ # See https://nvd.nist.gov/vuln/detail/CVE-2021-22940 'CVE-2021-22918', 'CVE-2021-22921', + 'CVE-2021-22930', 'CVE-2021-22931', 'CVE-2021-22939', 'CVE-2021-22940', - # This cve only affects versions of kafka < 2.8.1, but scanner - # does not support version matching atm. - # Tracking issue to fix versioning: - # https://github.com/envoyproxy/envoy/issues/18354 + # + # Currently, cvescan does not respect/understand versions (see #18354). + # + # The following CVEs target versions that are not currently used in the Envoy repo. + # + # libcurl + "CVE-2021-22945", + # + # kafka 'CVE-2021-38153', + # + # wasmtime + "CVE-2021-39216", + "CVE-2021-39218", + "CVE-2021-39219", ]) # Subset of CVE fields that are useful below.